コード例 #1
0
def calc_topo(dem_path):
    """
    Calculates slope and aspect from given DEM and saves output.
    The function checks to see whether a slope/aspect file has already been created so as to avoid needless processing.
    
    Parameters:
    dem_path (pathlib.PosixPath): The relative or absolute path to an input DEM file.

    Dependencies: 
    richdem module
    GDAL binaries
    pathlib module
    """
    slope_path = Path(
        str(dem_path).replace("dem", "slope"))
    aspect_path = Path(
        str(dem_path).replace("dem", "aspect"))

    if ((not slope_path.is_file()) or 
            (not aspect_path.is_file())):
        
        dem = rd.LoadGDAL(str(dem_path))

    if not slope_path.is_file():
        slope = rd.TerrainAttribute(
            dem, attrib='slope_riserun')
        rd.SaveGDAL(str(slope_path), slope)
    
    if not aspect_path.is_file():
        aspect = rd.TerrainAttribute(dem, attrib='aspect')
        rd.SaveGDAL(str(aspect_path), aspect)
コード例 #2
0
def create_slope_aspect(input_file):
    arr = rd.LoadGDAL(input_file,
                      no_data=-9999)  #rd.rdarray(input_file,no_data=-9999)
    aspect = rd.TerrainAttribute(arr, attrib='aspect')
    slope = rd.TerrainAttribute(arr, attrib='slope_radians')
    aspect_output = input_file[:-4] + '_aspect.tif'
    slope_output = input_file[:-4] + '_slope.tif'
    rd.SaveGDAL(aspect_output, aspect)
    rd.SaveGDAL(slope_output, slope)
    return aspect_output, slope_output
コード例 #3
0
ファイル: cli.py プロジェクト: xyt556/richdem
def FlowAccumulation():
  parser = argparse.ArgumentParser(formatter_class=RawTextHelpFormatter, description="""RichDEM Flow Accumulation

A variety of methods are available.

Method            Note                           Reference
Tarboton          Alias for Dinf.
Dinf              Alias for Tarboton.
Quinn             Holmgren with exponent=1.
Holmgren(E)       Generalization of Quinn.
Freeman(E)        TODO
FairfieldLeymarie Alias for Rho8.
Rho8              Alias for FairfieldLeymarie.
OCallaghan        Alias for D8.                  10.1016/S0734-189X(84)80011-0
D8                Alias for OCallaghan.          10.1016/S0734-189X(84)80011-0

Methods marked (E) require the exponent argument.
""")
  parser.add_argument('dem',              type=str,                help='Elevation model')
  parser.add_argument('outname',          type=str,                help='Name of output file')
  parser.add_argument('-m', '--method',   type=str, required=True, help='Flow accumulation method to use')
  parser.add_argument('-e', '--exponent', type=float,              help='Some methods require an exponent')
  parser.add_argument('-v', '--version',  action='version', version=rd._RichDEMVersion())
  args = parser.parse_args()

  dem = rd.LoadGDAL(args.dem)
  rd._AddAnalysis(dem, ' '.join(sys.argv))
  accum = rd.FlowAccumulation(dem, method=args.method, exponent=args.exponent)
  rd.SaveGDAL(args.outname, accum)
コード例 #4
0
def GaussianFilter(in_dem, sigma=1, out_file=None):
    """Applies a Gaussian filter to an image.

    Args:
        in_dem (str): File path to the input image.
        kernel_size (int, optional): The size of the moving window. Defaults to 3.
        out_file (str, optional): File path to the output image. Defaults to None.

    Returns:
        np.array: The numpy array containing the filtered image.
    """
    print("Gaussian filtering ...")
    start_time = time.time()
    dem = rd.LoadGDAL(in_dem)
    no_data = dem.no_data
    projection = dem.projection
    geotransform = dem.geotransform

    gau = ndimage.gaussian_filter(dem, sigma=sigma)
    gau = np2rdarray(gau, no_data, projection, geotransform)
    print("Run time: {:.4f} seconds".format(time.time() - start_time))

    if out_file is not None:
        print("Saving dem ...")
        rd.SaveGDAL(out_file, gau)
        return out_file

    return gau
コード例 #5
0
def MeanFilter(in_dem, kernel_size=3, out_file=None):
    """Applies a mean filter to an image.

    Args:
        in_dem (str): File path to the input image.
        kernel_size (int, optional): The size of the moving window. Defaults to 3.
        out_file (str, optional): File path to the output image. Defaults to None.

    Returns:
        np.array: The numpy array containing the filtered image.
    """
    print("Mean filtering ...")
    start_time = time.time()
    dem = rd.LoadGDAL(in_dem)
    no_data = dem.no_data
    projection = dem.projection
    geotransform = dem.geotransform

    weights = np.full((kernel_size, kernel_size),
                      1.0 / (kernel_size * kernel_size))
    mean = ndimage.filters.convolve(dem, weights)
    mean = np2rdarray(mean, no_data, projection, geotransform)
    print("Run time: {:.4f} seconds".format(time.time() - start_time))

    if out_file is not None:
        print("Saving dem ...")
        rd.SaveGDAL(out_file, mean)
        return out_file

    return mean
コード例 #6
0
ファイル: cli.py プロジェクト: xyt556/richdem
def TerrainAttribute():
  parser = argparse.ArgumentParser(formatter_class=RawTextHelpFormatter, description="""RichDEM Terrain Attribute

A variety of methods are available.

Parameters:
dem      -- An elevation model
attrib   -- Terrain attribute to calculate. (See below.)
zscale   -- How much to scale the z-axis by prior to calculation

Method:
slope_riserun
slope_percentage
slope_degrees
slope_radians
aspect
curvature
planform_curvature
profile_curvature
""")
  parser.add_argument('dem',              type=str,                help='Elevation model')
  parser.add_argument('outname',          type=str,                help='Name of output file')
  parser.add_argument('-a', '--attrib',   type=str, required=True, help='Terrain attribute to calculate')
  parser.add_argument('-z', '--zscale',   type=float, default=1.0, help='Scale elevations by this factor prior to calculation')
  parser.add_argument('-v', '--version',  action='version', version=rd._RichDEMVersion())
  args = parser.parse_args()

  dem = rd.LoadGDAL(args.dem)
  rd._AddAnalysis(dem, ' '.join(sys.argv))
  tattrib = rd.TerrainAttribute(dem, attrib=args.attrib, zscale=args.zscale)
  rd.SaveGDAL(args.outname, tattrib)
コード例 #7
0
ファイル: mounts.py プロジェクト: omarseleem92/lidar
def FlipDEM(dem, delta=100, out_file=None):
    """Flips the DEM.

    Args:
        dem (np.array): The numpy array containing the image.
        delta (int, optional): The base value to be added to the flipped DEM. Defaults to 100.
        out_file (str, optional): File path to the output image. Defaults to None.

    Returns:
        np.array: The numpy array containing the flipped DEM.
    """
    # get min and max elevation of the dem
    no_data = dem.no_data
    max_elev = np.float(np.max(dem[dem != no_data]))
    # min_elev = np.float(np.min(dem[dem != no_data]))

    dem = dem * (-1) + max_elev + delta
    dem[dem == no_data * (-1)] = no_data

    if out_file is not None:
        print("Saving flipped dem ...")
        rd.SaveGDAL(out_file, dem)
        return out_file

    return dem
コード例 #8
0
def curvature(input_file):
    """Create a curvature layer (combine profile and planform curvature) using richdem."""
    arr = rd.LoadGDAL(input_file)  #rd.rdarray(input_file,no_data=-9999)
    curvature = rd.TerrainAttribute(arr, attrib='curvature')
    output_file = input_file[:-9] + '_curvature_temp.tif'
    head, tail = os.path.split(output_file)
    rd.SaveGDAL(output_file, curvature)
    createMetadata(
        sys.argv,
        head + '/')  #just remove the filename so you are left with the path
コード例 #9
0
ファイル: cli.py プロジェクト: xyt556/richdem
def BreachDepressions():
  parser = argparse.ArgumentParser(formatter_class=RawTextHelpFormatter, description="""RichDEM Depression Breaching""")

  parser.add_argument('dem',                    type=str,                help='Elevation model')
  parser.add_argument('outname',                type=str,                help='Name of output file')
  parser.add_argument('-v', '--version',  action='version', version=rd._RichDEMVersion())
  args = parser.parse_args()

  dem = rd.LoadGDAL(args.dem)
  rd._AddAnalysis(dem, ' '.join(sys.argv))
  rd.BreachDepressions(dem)
  rd.SaveGDAL(args.outname, dem)
コード例 #10
0
ファイル: cli.py プロジェクト: xyt556/richdem
def DepressionFilling():
  parser = argparse.ArgumentParser(formatter_class=RawTextHelpFormatter, description='RichDEM Depression Filling')

  parser.add_argument('dem',     type=str,                     help='Elevation model')
  parser.add_argument('outname', type=str,                     help='Name of output file')
  parser.add_argument('-g', '--gradient', action='store_true', help='Ensure that all cells are at least an epsilon above their downstream cell. This ensures that each cell has a defined flow direction.')
  parser.add_argument('-v', '--version',  action='version', version=rd._RichDEMVersion())
  args = parser.parse_args()

  dem = rd.LoadGDAL(args.dem)
  rd._AddAnalysis(dem, ' '.join(sys.argv))
  rd.FillDepressions(dem, epsilon=args.gradient, in_place=True)
  rd.SaveGDAL(args.outname, dem)
コード例 #11
0
def BreachDepressions():
    parser = argparse.ArgumentParser(
        formatter_class=RawTextHelpFormatter,
        description="""RichDEM Depression Breaching

Modes:
Complete:    Breach everything.
            Ignore max_path_len, max_path_depth.
            There will be no depressions.
            There will be no mercy.
Selective:   Only breach those depressions that can be breached using the
            above criteria.
Constrained: Dig as long a path as necessary, but don't dig it deeper than
            max_path_depth.
""")

    parser.add_argument('dem', type=str, help='Elevation model')
    parser.add_argument('outname', type=str, help='Name of output file')
    parser.add_argument('-m',
                        '--mode',
                        required=True,
                        type=str,
                        help='Breaching mode to use')
    parser.add_argument(
        '-f',
        '--fill',
        action='store_true',
        help="If depressions can't be breached, should they be filled?")
    parser.add_argument('-l',
                        '--max_path_len',
                        type=int,
                        help="Maximum length of breaching path in cells")
    parser.add_argument('-d',
                        '--max_path_depth',
                        type=float,
                        help="Maximum depth of breaching path in z-units")
    parser.add_argument('-v',
                        '--version',
                        action='version',
                        version=rd._RichDEMVersion())
    args = parser.parse_args()

    dem = rd.LoadGDAL(args.dem)
    rd._AddAnalysis(dem, ' '.join(sys.argv))
    rd.BreachDepressions(dem,
                         mode=args.mode,
                         fill=args.fill,
                         max_path_len=args.max_path_len,
                         max_path_depth=args.max_path_depth,
                         in_place=True)
    rd.SaveGDAL(args.outname, dem)
コード例 #12
0
def drain_area(dem, drain_area_out):
    """
    Creates a raster where each pixel represents the contributing
    upstream drainage area in km2. DEM should be in a desired projected coordinate system.
    PARAMS
    :dem: string - path to dem raster file
    :drain_area_out: string - path to output drainage area raster file
    """

    dem_in = rd.LoadGDAL(dem)
    rd.FillDepressions(dem_in, epsilon=True, in_place=True)
    accum_d8 = rd.FlowAccumulation(dem_in, method='D8')
    da = accum_d8 * (accum_d8.geotransform[1]**2 / 1000000)
    rd.SaveGDAL(drain_area_out, da)

    return
コード例 #13
0
def FlipDEM(dem, delta=100, out_file=None):

    # get min and max elevation of the dem
    no_data = dem.no_data
    max_elev = np.float(np.max(dem[dem != no_data]))
    # min_elev = np.float(np.min(dem[dem != no_data]))

    dem = dem * (-1) + max_elev + delta
    dem[dem == no_data * (-1)] = no_data

    if out_file is not None:
        print("Saving flipped dem ...")
        rd.SaveGDAL(out_file, dem)
        return out_file

    return dem
コード例 #14
0
def pendiente(srcFolder="./", dstFolder="./"):
    for archivo in os.listdir(srcFolder):
        if archivo.endswith(".tif"):
            if srcFolder.endswith("/"):
                ruta = srcFolder + archivo
            else:
                ruta = srcFolder + "/" + archivo
            dem = richdem.LoadGDAL(ruta)
            slope = richdem.TerrainAttribute(dem, attrib='slope_radians')
            archivo = "pendiente_" + archivo
            if not os.path.exists(dstFolder):
                os.mkdir(dstFolder)
            if srcFolder.endswith("/"):
                dstRuta = dstFolder + archivo
            else:
                dstRuta = dstFolder + "/" + archivo
            richdem.SaveGDAL(dstRuta, slope)
コード例 #15
0
ファイル: test_lidar.py プロジェクト: chinxiWang/lidar
    def test_gaussian_filter(self):

        # identify the sample data directory of the package
        package_name = "lidar"
        data_dir = pkg_resources.resource_filename(package_name, "data/")
        print("Sample data directory: {}".format(data_dir))

        # use the sample dem. Change it to your own dem if needed
        in_dem = os.path.join(data_dir, "dem.tif")
        out_dir = os.path.join(os.path.expanduser("~"), "temp")
        if not os.path.exists(out_dir):
            os.mkdir(out_dir)

        gaussian_dem = os.path.join(out_dir, "gaussian.tif")
        gaussian = lidar.GaussianFilter(in_dem,  sigma=1)  
        rd.SaveGDAL(gaussian_dem, gaussian)

        self.assertTrue(os.path.exists(gaussian_dem))
コード例 #16
0
def weighted_accum(in_dir, weight, out_directory, out_raster, boundary_geom):
    # uses the pysheds library to calculate flow accumulation, however its weighted with by a given array.
    with rasterio.open(os.path.join(out_directory, weight)) as src:
        weights = src.read(1)
        weights = np.where(weights == -9999, 0, 1)
        norm = np.linalg.norm(weights)
        weights = weights / norm
        #
        # g.accumulation(data='dir', weights=weights, out_name='weights_accum')
        # grid.to_raster('weights_accum', os.path.join(out_directory, out_raster),dtype=np.int32)

        accum = rd.FlowAccumulation(dem,
                                    method='D8',
                                    weights=weights.astype('float64'))

        rd.SaveGDAL(os.path.join(out_directory, "temp.tif"), accum)
        clip_to_boundary(out_directory, out_dir, boundary_geom, f"temp.tif",
                         out_raster)
コード例 #17
0
def MedianFilter(in_dem, kernel_size=3, out_file=None):

    print("Median filtering ...")
    start_time = time.time()
    dem = rd.LoadGDAL(in_dem)
    no_data = dem.no_data
    projection = dem.projection
    geotransform = dem.geotransform

    med = ndimage.median_filter(dem, size=kernel_size)
    med = np2rdarray(med, no_data, projection, geotransform)
    print("Run time: {:.4f} seconds".format(time.time() - start_time))

    if out_file is not None:
        print("Saving dem ...")
        rd.SaveGDAL(out_file, med)
        return out_file

    return med
コード例 #18
0
def GaussianFilter(in_dem, sigma=1, out_file=None):

    print("Gaussian filtering ...")
    start_time = time.time()
    dem = rd.LoadGDAL(in_dem)
    no_data = dem.no_data
    projection = dem.projection
    geotransform = dem.geotransform

    gau = ndimage.gaussian_filter(dem, sigma=sigma)
    gau = np2rdarray(gau, no_data, projection, geotransform)
    print("Run time: {:.4f} seconds".format(time.time() - start_time))

    if out_file is not None:
        print("Saving dem ...")
        rd.SaveGDAL(out_file, gau)
        return out_file

    return gau
コード例 #19
0
def MeanFilter(in_dem, kernel_size=3, out_file=None):

    print("Mean filtering ...")
    start_time = time.time()
    dem = rd.LoadGDAL(in_dem)
    no_data = dem.no_data
    projection = dem.projection
    geotransform = dem.geotransform

    weights = np.full((kernel_size, kernel_size),
                      1.0 / (kernel_size * kernel_size))
    mean = ndimage.filters.convolve(dem, weights)
    mean = np2rdarray(mean, no_data, projection, geotransform)
    print("Run time: {:.4f} seconds".format(time.time() - start_time))

    if out_file is not None:
        print("Saving dem ...")
        rd.SaveGDAL(out_file, mean)
        return out_file

    return mean
コード例 #20
0
def add_slope_aspect_curvature(df, file, indexes):
    for attr in ['slope_percentage', 'aspect', 'profile_curvature']:
        table = None
        try:
            table = rd.TerrainAttribute(rd.LoadGDAL(file, no_data=-9999),
                                        attrib=attr)
            rd.SaveGDAL("./temp.tif", table)
            table = None
            table = gr.from_file("./temp.tif")
            for index in indexes:
                try:
                    row = df.loc[index]
                    val = table.map_pixel(row['lon'], row['lat'])
                    df.loc[index, attr] = float(val)
                except:
                    df.loc[index, attr] = np.nan
            os.remove("./temp.tif")
        except:
            for index in indexes:
                df.loc[index, attr] = np.nan
    return df
コード例 #21
0
def ExtractSinks(in_dem, min_size, out_dir):

    start_time = time.time()

    out_dem = os.path.join(out_dir, "dem.tif")
    out_dem_filled = os.path.join(out_dir, "dem_filled.tif")
    out_dem_diff = os.path.join(out_dir, "dem_diff.tif")
    out_sink = os.path.join(out_dir, "sink.tif")
    out_region = os.path.join(out_dir, "region.tif")
    out_depth = os.path.join(out_dir, "depth.tif")
    out_csv_file = os.path.join(out_dir, "depressions_info.csv")
    out_vec_file = os.path.join(out_dir, "depressions.shp")


    # delete contents in output folder if existing
    if not os.path.exists(out_dir):
        os.mkdir(out_dir)

    # load the dem and get dem info
    print("Loading data ...")
    dem = rd.LoadGDAL(in_dem)
    no_data = dem.no_data
    projection = dem.projection
    geotransform = dem.geotransform
    cell_size = geotransform[1]

    # get min and max elevation of the dem
    max_elev = np.float(np.max(dem))
    min_elev = np.float(np.min(dem[dem > 0]))
    print("min = {:.2f}, max = {:.2f}, no_data = {}, cell_size = {}".format(min_elev, max_elev, no_data, cell_size))

    # depression filling
    print("Depression filling ...")
    dem_filled = rd.FillDepressions(dem, in_place=False)
    dem_diff = dem_filled - dem
    dem_diff.no_data = 0

    print("Saving filled dem ...")
    rd.SaveGDAL(out_dem_filled, dem_filled)
    rd.SaveGDAL(out_dem_diff, dem_diff)

    # nb_labels is the total number of objects. 0 represents background object.
    print("Region grouping ...")
    label_objects, nb_labels = regionGroup(dem_diff, min_size, no_data)
    # regions = measure.regionprops(label_objects, dem_diff)
    dem_diff[label_objects == 0] = 0
    depth = np2rdarray(dem_diff, no_data=0, projection=projection, geotransform=geotransform)
    rd.SaveGDAL(out_depth, depth)
    del dem_diff, depth

    print("Computing properties ...")
    objects = measure.regionprops(label_objects, dem)
    dep_list = get_dep_props(objects, cell_size)
    write_dep_csv(dep_list, out_csv_file)
    del objects, dep_list

    # convert numpy to richdem data format
    region = np2rdarray(label_objects, no_data=0, projection=projection, geotransform=geotransform)
    del label_objects

    print("Saving sink dem ...")
    sink = np.copy(dem)
    sink[region == 0] = 0
    sink = np2rdarray(sink, no_data=0, projection=projection, geotransform=geotransform)
    rd.SaveGDAL(out_sink, sink)
    # del sink

    print("Saving refined dem ...")
    dem_refined = dem_filled
    dem_refined[region > 0] = dem[region > 0]
    dem_refined = np2rdarray(dem_refined, no_data=no_data, projection=projection, geotransform=geotransform)
    rd.SaveGDAL(out_dem, dem_refined)
    rd.SaveGDAL(out_region, region)
    del dem_refined, region, dem

    print("Converting raster to vector ...")
    polygonize(out_region, out_vec_file)

    # # plot dems
    # demfig = rd.rdShow(dem, ignore_colours=[0], axes=False, cmap='jet', figsize=(8, 5.5))
    # demfig_filled = rd.rdShow(dem_filled, ignore_colours=[0], axes=False, cmap='jet', vmin=demfig['vmin'],
    #                           vmax=demfig['vmax'], figsize=(8, 5.5))
    # demfig_diff = rd.rdShow(dem_diff, ignore_colours=[0], axes=False, cmap='jet', figsize=(8, 5.5))

    end_time = time.time()
    print("Total run time:\t\t\t {:.4f} s".format(end_time - start_time))

    return sink
コード例 #22
0
ファイル: flow.py プロジェクト: VVionnet/mesher
import subprocess
from osgeo import gdal, ogr

dem = rd.LoadGDAL("chro_extent_lowRes.tif")
dem = dem.astype(np.float32, copy=False)
#Fill depressions with epsilon gradient to ensure drainage
rd.FillDepressions(dem, epsilon=True, in_place=True)

#Get flow accumulation with no explicit weighting. The default will be 1.
accum = rd.FlowAccumulation(dem, method='Dinf')

# accum[ accum >= 500] = 1
# accum[ accum < 500] = 0
# d8_fig = rd.rdShow(accum, zxmin=450, zxmax=550, zymin=550, zymax=450, figsize=(8,5.5), axes=False, cmap='jet')

rd.SaveGDAL('flow_accumulation.tif', accum)

# tmp_raster = 'd8.tif'
# base_dir=''
# plgs_shp='rivernetwork.shp'
# subprocess.check_call(['gdal_polygonize.py %s -b 1 -mask %s -f "ESRI Shapefile" %s' % (tmp_raster, tmp_raster,
#                                                                                        base_dir +
#                                                                                        plgs_shp)], shell=True)

# exec_string = 'ogr2ogr -overwrite %s %s  -nlt LINESTRING' % ('line_' + plgs_shp, base_dir + plgs_shp)

# # if simplify:
# simplify_tol=50
# exec_string = exec_string + ' -simplify ' + str(simplify_tol)

# subprocess.check_call(exec_string, shell=True)
コード例 #23
0
import richdem as rd

fl = './data/NE/cedar2m/hdr.adf'
outfl = './data/cedar2m_d8_accum.tiff'
dem = rd.LoadGDAL(fl)
rd.FillDepressions(dem, in_place=True)
accum_d8 = rd.FlowAccumulation(dem, method='D8')
rd.SaveGDAL(outfl, accum_d8)
コード例 #24
0
    try:
        val = table.map_pixel(row['LONWGS84'], row['LATWGS84'])
        table_y.loc[index, 'ELEVATION'] = float(val)
        cnt += 1
    except:
        table_y.loc[index, 'ELEVATION'] = 0
print("Added elevation information to " + repr(cnt) + " instances out of " +
      repr(len(table_y)) + " data instances...")

print("Computing information on terrain slope...")
for attr in ['slope_percentage', 'aspect', 'profile_curvature']:
    table_y[attr.upper()] = 0.0
    table = None
    table = rd.TerrainAttribute(
        rd.LoadGDAL("./globcover/digital-elevation-model.tif"), attrib=attr)
    rd.SaveGDAL("./slope.tif", table)
    table = None
    NDV, xsize, ysize, GeoT, Projection, DataType = gr.get_geo_info(
        "./slope.tif")
    table = gr.from_file("./slope.tif")
    cnt = 0
    for index, row in table_y.iterrows():
        try:
            val = table.map_pixel(row['LONWGS84'], row['LATWGS84'])
            table_y.loc[index, attr.upper()] = float(val)
            cnt += 1
        except:
            table_y.loc[index, attr.upper()] = 0.0
    os.remove("./slope.tif")
    print("Added " + attr + " information to " + repr(cnt) +
          " instances out of " + repr(len(table_y)) + " data instances...")
コード例 #25
0
"""
Created on Sat Nov 21 19:44:49 2020

@author: joempie
"""
import os
from pysheds.grid import Grid
import richdem as rd

filledDemPath = '../data/N09E037.tif'
x, y = 37.15251, 9.55677

if (not os.path.isfile(filledDemPath)):
    dem = rd.LoadGDAL('../data/N09E037.hgt')
    rd.FillDepressions(dem, epsilon=True, in_place=True)
    rd.SaveGDAL(filledDemPath, dem)

grid = Grid.from_raster(filledDemPath, data_name='infl_dem')
# grid.fill_depressions(data='dem',out_name='flooded_dem')
# grid.resolve_flats(data='flooded_dem')

# Specify directional mapping
dirmap = (64, 128, 1, 2, 4, 8, 16, 32)
grid.flowdir(data='infl_dem', out_name='dir', dirmap=dirmap)

# Delineate the catchment
grid.catchment(data='dir',
               x=x,
               y=y,
               dirmap=dirmap,
               out_name='catch',
コード例 #26
0
ファイル: LSR_engine.py プロジェクト: SergeiShevyrev/LSR
def engineTopo(in_path='None',out_path='None',shpfilepath='None',drm_filepath='None',\
               products=[],bandStacks=[],is_topocorrection=False,SunElevation=30,\
               SunAzimuth=180,fileext="tif"):
    time_start = time.time()

    #some sample parameters for the topocorrection
    #is_topocorrection=True; #topocorrection flag
    #SunElevation=28.41189977  #31.23944509
    #SunAzimuth=163.93705102      #163.133415

    #Sun Elevation L1	 31.73425917
    #Sun Azimuth L1	162.99110733
    """
    #for 105-029
    #SunElevation=31.23944509 
    #SunAzimuth=163.133415      
    
    """
    if (drm_filepath == 'None'):
        is_topocorrection = False

    SolarZenith = 90 - SunElevation
    """
    You may exclude files from processing by renaming like ".tiff"
    """

    #files for processing, input and output directory
    #pathrowfolder="104_029"
    #datefolder="2015_11_05"
    #imgfilepath=os.path.join("..","Landsat8",pathrowfolder,datefolder);
    #shpfilepath=os.path.join("..","shp",pathrowfolder+".shp");
    #shpfilepath=os.path.join("..","shp","AOI_tmp"+".shp");

    #fileext="tif"; #extention for files
    #outdir=os.path.join("..","Landsat8_Processed",pathrowfolder,datefolder);
    dir_cropped = "cropped_bands_topo"  #dir for AOI cropped
    dir_crop_path = os.path.join(out_path, dir_cropped)
    dir_products = "products_topo"
    dir_products_path = os.path.join(out_path, dir_products)
    band_number_inname = '_b%N%.'  #%N% - for band number e.g. LC81050292016143LGN00_B6.TIF NOT A CASE SENSITIVE
    band_number_inname = band_number_inname.lower()
    excl_pfix = '_b8'
    #endfile postfix to exclude from processing

    #drm for topocorrection
    #drm_name="mosaic_dem_south_kuril_utm.tif";
    #drm_folder=os.path.join("..","SRTM","files_for_mosaic");
    #drm_filepath=os.path.join(drm_folder,drm_name);

    #nodata srtm -32768

    #check is file/folder exists
    #print(os.path.isdir("/home/el"))
    #print(os.path.exists("/home/el/myfile.txt"))
    #

    file_for_crop = []

    try:
        for file in os.listdir(in_path):
            #file=file.lower();
            if file.lower().endswith("." + fileext.lower()) and (
                    file.lower().endswith(excl_pfix + '.' +
                                          fileext.lower())) == False:
                file_for_crop.append(file)
                print(file + " was added to crop queue.")
    except (FileNotFoundError):
        print("Input image folder doesn\'t exist...")
    """
    ДОПОЛНЕНИЯ в GUI сделать генерацию AOI shp выделением на изображении, если пользователь не генерирует 
    AOI, то AOI задать по размеру изображения
    """
    #STEP 0. Prepare for the topocorrection

    try:
        shp_extent = get_shp_extent(shpfilepath)
    except:
        print("Can not read shp AOI file. Applying extent from geotiff")
        gdal_object_tmp = gdal.Open(os.path.join(in_path, file_for_crop[0]))
        tmp_xsize = gdal_object_tmp.RasterXSize
        tmp_ysize = gdal_object_tmp.RasterYSize  #x and y raster size in pixels
        tmp_gt = gdal_object_tmp.GetGeoTransform()
        tmp_ext = GetExtent(tmp_gt, tmp_ysize, tmp_xsize)
        shp_extent = [
            tmp_ext[0][0], tmp_ext[2][0], tmp_ext[1][1], tmp_ext[3][1]
        ]

    #crop dem file
    if is_topocorrection == True:
        print("Perform cropping of srtm")

        #read DEM geotiff
        srtm_gdal_object = gdal.Open(drm_filepath)
        srtm_band = srtm_gdal_object.GetRasterBand(1)
        srtm_band_array = srtm_band.ReadAsArray()

        #get spatial resolution
        srtm_gt = srtm_gdal_object.GetGeoTransform()
        srtm_xsize = srtm_gdal_object.RasterXSize
        srtm_ysize = srtm_gdal_object.RasterYSize  #x and y raster size in pixels
        srtm_ext = GetExtent(
            srtm_gt, srtm_ysize,
            srtm_xsize)  #[[влx,влy],[нлx,нлy],[нпy, нпy],[впx, впy]]
        #resolution in meters
        srtm_dpx = (srtm_ext[3][0] - srtm_ext[0][0]) / srtm_xsize
        srtm_dpy = (srtm_ext[0][1] - srtm_ext[2][1]) / srtm_ysize
        """
        print("srtm_ext={}".format(srtm_ext))
        print("shp_extent={}".format(shp_extent))
        """
        if check_shp_inside_raster(srtm_ext, shp_extent):
            #        sampleSrtmImage,ColMinIndSRTM,RowMinIndSRTM =crop_by_shp(shp_extent,srtm_ext,\
            #                                                    srtm_dpx,srtm_dpy,srtm_band_array);
            srtm_band = rd.LoadGDAL(drm_filepath)

            slope = rd.TerrainAttribute(srtm_band, attrib='slope_degrees')
            aspect = rd.TerrainAttribute(srtm_band, attrib='aspect')

            rd.SaveGDAL(
                os.path.join(os.path.dirname(drm_filepath),
                             "aspectInitialRes.tif"), aspect)
            rd.SaveGDAL(
                os.path.join(os.path.dirname(drm_filepath),
                             "SlopeInitialRes.tif"), slope)
        else:
            print("AOI shp file" + shpfilepath + "is not inside of DEM" +
                  drm_filepath + ". Stopping.")
            return -1
            #input('Press Enter for exit...')
            #exit;

        #reopening SRTM products
        #read srtm products
        aspect_gdal_object = gdal.Open(
            os.path.join(os.path.dirname(drm_filepath),
                         "aspectInitialRes.tif"))  #aspect
        aspect_band = aspect_gdal_object.GetRasterBand(1)
        aspect_band_array = aspect_band.ReadAsArray()

        slope_gdal_object = gdal.Open(
            os.path.join(os.path.dirname(drm_filepath),
                         "SlopeInitialRes.tif"))  #slope
        slope_band = slope_gdal_object.GetRasterBand(1)
        slope_band_array = slope_band.ReadAsArray()

        #get PRODUCTS spatial resolution
        srtm_gt, srtm_xsize, srtm_ysize, srtm_ext, srtm_dpx, srtm_dpy = getGeotiffParams(
            aspect_gdal_object)

        #check if SRTM products inside of SHP AOI ad crop it
        if check_shp_inside_raster(srtm_ext, shp_extent):
            #do image crop
            aspect_cropped, ColMinInd, RowMinInd = crop_by_shp(
                shp_extent, srtm_ext, srtm_dpx, srtm_dpy, aspect_band_array)
            slope_cropped, ColMinInd, RowMinInd = crop_by_shp(
                shp_extent, srtm_ext, srtm_dpx, srtm_dpy, slope_band_array)

            #for testing purporses
            saveGeoTiff(slope_cropped, 'test_crop_slope.tif',
                        slope_gdal_object, ColMinInd,
                        RowMinInd)  #tryna save cropped geotiff

        else:
            print("SRTM is outside of the AOI, exiting...")
            return -1
            #exit();

    was_corrected = False
    #flag to check if resolution and scale were corrected to landsat8
    #STEP 1. CROP geotiffs one by one with AOI shape file
    print("Step. 1. Starting geotiff crop operation...")
    for myfile in file_for_crop:
        #read geotiff
        gdal_object = gdal.Open(os.path.join(in_path, myfile))
        band = gdal_object.GetRasterBand(1)
        band_array = band.ReadAsArray()

        #get spatial resolution
        #do image crop
        gt, xsize, ysize, ext, dpx, dpy = getGeotiffParams(gdal_object)
        """
        gt=gdal_object.GetGeoTransform()
        xsize = gdal_object.RasterXSize
        ysize = gdal_object.RasterYSize #x and y raster size in pixels
        ext=GetExtent(gt,ysize,xsize) #[[влx,влy],[нлx,нлy],[нпy, нпy],[впx, впy]]
        #resolution in meters
        dpx=(ext[3][0]-ext[0][0])/xsize
        dpy=(ext[0][1]-ext[2][1])/ysize
        print(ext)
        """
        #apply shp file
        #try:
        #    shp_extent=get_shp_extent(shpfilepath);
        #except:
        #    print("Can not read shp AOI file.")

        #check shp posiiton inside of tiff
        if check_shp_inside_raster(ext, shp_extent):
            #do image crop
            sampleImage, ColMinInd, RowMinInd = crop_by_shp(
                shp_extent, ext, dpx, dpy, band_array)

        else:
            print("AOI shp file" + shpfilepath + "is not inside of tiff" +
                  myfile + ". Stopping.")
            #input('Press Enter for exit...')
            return -1
            #exit;

        #topocorrection
        if is_topocorrection == True:  #topocorrection flag
            if was_corrected == False:
                print('compute slope and aspect cropped')
                #коррекция aspect по Landsat8
                #adjust srtm resolution to landsat8
                [hlc, wlc] = np.shape(sampleImage)
                aspect_band_cropped = resize(
                    aspect_cropped, (hlc, wlc),
                    preserve_range=True,
                    mode="wrap")  #it works with scikit-image resize

                #коррекция slope по Landsat8
                slope_band_cropped = resize(
                    slope_cropped, (hlc, wlc),
                    preserve_range=True,
                    mode="wrap")  #it works with scikit-image resize


                Cos_i=np.cos(np.deg2rad(slope_band_cropped))*np.cos(np.deg2rad(SolarZenith))+\
                np.sin(np.deg2rad(slope_band_cropped))*np.sin(np.deg2rad(SolarZenith))*\
                np.cos(np.deg2rad(SunAzimuth-aspect_band_cropped))

                #ЭТОТ РАСЧЕТ КОС I РАССМАТРИВАЕТ ВСЕ СКЛОНЫ КАК ОСВЕЩЕННЫЕ ПОД ПРЯМЫМ УГЛОМ!
                #Cos_i=np.cos(np.deg2rad(SolarZenith-slope_band_cropped));

                #Do SCS+C correction anyway
                """
                print("Check correlation between Cos(i) and Luminocity")
                R_mat=np.corrcoef(Cos_i.ravel(),sampleImage.ravel()) 
                print("R="+str(R_mat[0,1]));            
                if( R_mat[0,1]<0.5):
                    print("No or weak correlation, use SCS algoritm...");
                    C=0;
                else:
                    print("Not a weak correlation, use SCS+C algoritm...");
                    (b,a)=np.polyfit(Cos_i.ravel(),sampleImage.ravel(),1);
                    C=a/b;
                 """
                (b, a) = np.polyfit(Cos_i.ravel(), sampleImage.ravel(), 1)
                C = a / b
                was_corrected = True
                #switch the flag to true

            print("Performing topographic correction.. Please, WAIT..")
            #Sun-Canopy-Sensor Correction (SCS)+C
            band_array=np.uint16(sampleImage*\
                    ((np.cos(np.deg2rad(SolarZenith))*np.cos(np.deg2rad(slope_band_cropped))+C)\
                     /(C+Cos_i)))
            pic_show(sampleImage, "landsat initial")
            hist_show(sampleImage)
            pic_show(band_array, "landsat SCS corrected")
            hist_show(band_array)
        else:  #no topocorrection
            print("No topocorrection was selected..")
            band_array = copy.copy(sampleImage)
            #no operation

        #check shp posiiton inside of tiff
        #if check_shp_inside_raster(ext,shp_extent):
        #    #do image crop
        #sampleImage,ColMinInd,RowMinInd =crop_by_shp(shp_extent,ext,dpx,dpy,band_array)

        #if is_topocorrection==True: #topocorrection flag
        #    #do topocorrection with SCS Algorythm

        #drop image to the disk
        print("drop image to the disk")
        outfilename = os.path.join(dir_crop_path, "crop_" + myfile.lower())
        if not os.path.isdir(dir_crop_path):
            os.makedirs(dir_crop_path)  #create output directory if none
        try:
            saveGeoTiff(band_array, outfilename, gdal_object, ColMinInd,
                        RowMinInd)  #save topocorrected Landsat crop
        except:
            print(
                "Can not write on a disk... and/or error(s) in saveGeoTiff function"
            )

    #STEP 2. COMPUTE pseudocolor RGB stacks and satellite indexes
    """
    автоопределение BANDs для дефолтных имен, если пользователь не задал имена (пока что имена по умолчанию), 
    пропускаем индекс или RGB стек, если не находим BAND NUMBER
    """
    print("Step. 2. Getting names of the cropped files...")
    #getting names of the cropped files, aquire band names
    file_for_processing = []
    try:
        for file in os.listdir(
                dir_crop_path
        ):  #набираем файлы из папки с кадрированными изображениями
            file = file.lower()
            if file.endswith("." + fileext.lower()):
                file_for_processing.append(file)
                print(file + " was added to the processing queue.")
    except (FileNotFoundError):
        print("Input image folder doesn\'t exist...")

    bands = {}
    #dictionary storing band names
    for myfile in file_for_processing:
        for N in range(1, 9):
            #populating bands dictionary
            if band_number_inname.replace('%n%', str(N), 1) in myfile:
                try:
                    gdal_object = gdal.Open(
                        os.path.join(dir_crop_path, myfile)
                    )  #as new gdal_object was created, no more ColMinInd,RowMinInd
                    bands['band' +
                          str(N)] = gdal_object.GetRasterBand(1).ReadAsArray()
                except:
                    print("Error! Can not read cropped bands!")
    #print("Bands dictionary output:")
    #print(bands)

    try:
        #create RGB stacks:
        #truecolor
        if ('rgb' in bandStacks):
            truecolorRGB = image_stack(bands['band4'],
                                       bands['band3'],
                                       bands['band2'],
                                       do_norm8=0,
                                       do_show=0)

        #Комбинация 7-4-2. Изображение близкое к естественным цветам, позволяет анализировать состояние атмосферы и дым. Здоровая растительность выглядит ярко зеленой, ярко розовые участки детектируют открытую почву, коричневые и оранжевые тона характерны для разреженной растительности.
        if ('742' in bandStacks):
            b742RGB = image_stack(bands['band7'],
                                  bands['band4'],
                                  bands['band2'],
                                  do_norm8=0,
                                  do_show=0)
        #Комбинация 5-4-1. Изображение близкое к предыдущему, позволяет анализировать сельскохозяйственные культуры
        if ('652' in bandStacks):
            b652RGB = image_stack(bands['band6'],
                                  bands['band5'],
                                  bands['band2'],
                                  do_norm8=0,
                                  do_show=0)
        #Комбинация 4-5-3. Изображение позволяет четко различить границу между водой и сушей, с большой точностью будут детектироваться водные объекты внутри суши. Эта комбинация отображает растительность в различных оттенках и тонах коричневого, зеленого и оранжевого, дает возможность анализа влажности и полезны при изучении почв и растительного покрова.
        if ('453' in bandStacks):
            b453RGB = image_stack(bands['band4'],
                                  bands['band5'],
                                  bands['band3'],
                                  do_norm8=0,
                                  do_show=0)

        #after Aydal, 2007
        if ('642' in bandStacks):
            b642RGB = image_stack(bands['band6'],
                                  bands['band4'],
                                  bands['band2'],
                                  do_norm8=0,
                                  do_show=0)
        if ('765' in bandStacks):
            b765RGB = image_stack(bands['band7'],
                                  bands['band6'],
                                  bands['band5'],
                                  do_norm8=0,
                                  do_show=0)
        if ('764' in bandStacks):
            b764RGB = image_stack(bands['band7'],
                                  bands['band6'],
                                  bands['band4'],
                                  do_norm8=0,
                                  do_show=0)

        #create indexes
        if ('NDVI' in products):
            NDVI = (bands['band5'] - bands['band4']) / (
                bands['band5'] + bands['band4'])  #NDVI
        if ('IOA' in products) or ('CA' in products):
            IOA = (bands['band4'] / bands['band2']
                   )  #Iron oxide alteration [Doğan Aydal, 2007]
        if ('HA' in products) or ('CA' in products):
            HA = (bands['band7'] / bands['band2']
                  )  #Hydroxyl alteration [Doğan Aydal, 2007]
        if ('CM' in products):
            CM = (bands['band7'] / bands['band6']
                  )  #Clay minerals [Doğan Aydal, 2007]

        #compute PCA
        if ('PC' in products):
            print("Started to compute PCA...")
            print("Flatten image matrix...")
            flattened_img_matrix=mat4pca((bands['band1'],bands['band2'],bands['band3'],\
                                          bands['band4'],bands['band5'],bands['band6'],bands['band7']))
            #mybands=[bands['band1'],bands['band2'],bands['band3'],\
            #                              bands['band4'],bands['band5'],bands['band6'],bands['band7']]
            #tmp_matrix=[mynormalize16to8(tmpband) for tmpband in mybands]
            #flattened_img_matrix=mat4pca(tmp_matrix) #same but images are normalized to uint8
            print("Compute PCA matrix, the variance and the mean...")

            m, n = np.shape(bands['band3'])  #temporary height and width
            (pca, eigenvalues, var_X,
             mean_X) = pca_make(flattened_img_matrix, 7, m, n)

        #create cumulative image composite image of the hydroxyl image(red band), the iron oxide image
        #(green band) and the average of these two images (blue band).
        if ('CA' in products):
            index_composite = image_stack(HA, IOA, (HA + IOA) / 2, 1, 0)

    except:
        print('No bands or bands error!')
        return -1
    #GENERAL OUTPUT
    if ('PC' in products):
        print("Prepare to show PCA images")

        #later incorporate path into functions
        if not os.path.isdir(dir_products_path):
            os.makedirs(
                dir_products_path)  #create output products directory if none

        fig_save_cumsum_path = os.path.join(dir_products_path,
                                            "variance_cumsum.svg")
        fig_save_pca_path = os.path.join(dir_products_path, "pca_comp.png")

        #num_comp=show_pca_cumsum(pca,fig_save_cumsum_path); #pca variance cumsum to determine right number of components
        #show_pca_images(eigenvalues,mean_X,m,n,fig_save_pca_path) #show pca component images

    #COMPUTE Landsat and PCA stat for the CROSTA METHOD
    try:
        stat_bands_save = os.path.join(dir_products_path, "bands_stat.xls")
        cor_bands_save = os.path.join(dir_products_path, "bands_cor_stat.xls")
        cov_bands_pca_save = os.path.join(dir_products_path,
                                          "bands_pca_cov_stat.xls")

        print("Saving band stat to {}".format(stat_bands_save))
        save_landsat_bands_stat(bands, stat_bands_save)

        print("Saving bands mutual correlation to {}".format(cor_bands_save))
        save_landsat_mutual_cor(bands, cor_bands_save)
    except:
        print('can not save band stats')
    try:  #correlation of bands and PCA comp may be potentially errorneous, dep on PCA number
        print(
            "Saving covariance between bands and PCA components to {}".format(
                cov_bands_pca_save))
        save_landsat_pca_cov(bands, eigenvalues, cov_bands_pca_save)
    except:
        print('Can not compute/save pca/bands covariance...')

    #save RGB's and index to the disk
    print("Saving products on a disk")
    if not os.path.isdir(dir_products_path):
        os.makedirs(dir_products_path)  #create output directory if none
    try:
        print("Saving RGBs...")
        ColMinInd = 0
        RowMinInd = 0
        #because we work on already cropped pictures
        if ('rgb' in bandStacks):
            saveGeoTiff(
                truecolorRGB,
                os.path.join(dir_products_path, "truecolorRGB" + ".tif"),
                gdal_object, ColMinInd, RowMinInd)
        if ('742' in bandStacks):
            saveGeoTiff(b742RGB,
                        os.path.join(dir_products_path, "b742RGB" + ".tif"),
                        gdal_object, ColMinInd, RowMinInd)
        if ('652' in bandStacks):
            saveGeoTiff(b652RGB,
                        os.path.join(dir_products_path, "b652RGB" + ".tif"),
                        gdal_object, ColMinInd, RowMinInd)
        if ('453' in bandStacks):
            saveGeoTiff(b453RGB,
                        os.path.join(dir_products_path, "b453RGB" + ".tif"),
                        gdal_object, ColMinInd, RowMinInd)
        #Aydal pseudocolor:
        if ('642' in bandStacks):
            saveGeoTiff(b642RGB,
                        os.path.join(dir_products_path, "b642RGB" + ".tif"),
                        gdal_object, ColMinInd, RowMinInd)
        if ('765' in bandStacks):
            saveGeoTiff(b765RGB,
                        os.path.join(dir_products_path, "b765RGB" + ".tif"),
                        gdal_object, ColMinInd, RowMinInd)
        if ('764' in bandStacks):
            saveGeoTiff(b764RGB,
                        os.path.join(dir_products_path, "b764RGB" + ".tif"),
                        gdal_object, ColMinInd, RowMinInd)

        print("Saving Indexes...")
        if ('NDVI' in products):
            saveGeoTiff(NDVI, os.path.join(dir_products_path, "NDVI" + ".tif"),
                        gdal_object, ColMinInd, RowMinInd)
        if ('IOA' in products):
            saveGeoTiff(IOA, os.path.join(dir_products_path, "IOA" + ".tif"),
                        gdal_object, ColMinInd, RowMinInd)
        if ('HA' in products):
            saveGeoTiff(HA, os.path.join(dir_products_path, "HA" + ".tif"),
                        gdal_object, ColMinInd, RowMinInd)
        if ('CM' in products):
            saveGeoTiff(CM, os.path.join(dir_products_path, "CM" + ".tif"),
                        gdal_object, ColMinInd, RowMinInd)
        if ('CA' in products):
            saveGeoTiff(
                index_composite,
                os.path.join(dir_products_path,
                             "CumulativeAlteration" + ".tif"), gdal_object,
                ColMinInd, RowMinInd)

        if ('PC' in products):
            print("Saving PCA components...")
            print("Result for the RANDOMIZED solver")
            for ev in range(0, len(eigenvalues[:, 1, 1])):
                PCAcomp = eigenvalues[ev, :, :].reshape(m, n)
                saveGeoTiff(
                    PCAcomp,
                    os.path.join(dir_products_path,
                                 "PCA{}_".format(ev + 1) + ".tif"),
                    gdal_object, ColMinInd, RowMinInd)

        print("Products data were saved.")
        return 1
    except:
        print(
            "Can not write PRODUCTS on a disk... and/or error(s) in saveGeoTiff function"
        )
        return -1

    print("Operations were finished. It took {} sec".format(time.time() -
                                                            time_start))
コード例 #27
0
    med_fig = rd.rdShow(med,
                        ignore_colours=[0],
                        axes=False,
                        cmap="jet",
                        figsize=(6, 5.5))
    gau_fig = rd.rdShow(gau,
                        ignore_colours=[0],
                        axes=False,
                        cmap="jet",
                        figsize=(6, 5.5))
    mean_diff_fig = rd.rdShow(mean_diff,
                              ignore_colours=[0],
                              axes=False,
                              cmap="jet",
                              figsize=(6, 5.5))
    med_diff_fig = rd.rdShow(med_diff,
                             ignore_colours=[0],
                             axes=False,
                             cmap="jet",
                             figsize=(6, 5.5))
    gau_diff_fig = rd.rdShow(gau_diff,
                             ignore_colours=[0],
                             axes=False,
                             cmap="jet",
                             figsize=(6, 5.5))

    # save results
    rd.SaveGDAL(mean_dem, mean)
    rd.SaveGDAL(median_dem, med)
    rd.SaveGDAL(gaussian_dem, gau)
コード例 #28
0
ファイル: filling.py プロジェクト: omarseleem92/lidar
def ExtractSinks(in_dem, min_size, out_dir):
    """Extract sinks (e.g., maximum depression extent) from a DEM.

    Args:
        in_dem (str): File path to the input DEM.
        min_size (int): The minimum number of pixels to be considered as a sink.
        out_dir (str): File path to the output directory.

    Returns:
        object: The richDEM array containing sinks.
    """
    start_time = time.time()

    out_dem = os.path.join(out_dir, "dem.tif")
    out_dem_filled = os.path.join(out_dir, "dem_filled.tif")
    out_dem_diff = os.path.join(out_dir, "dem_diff.tif")
    out_sink = os.path.join(out_dir, "sink.tif")
    out_region = os.path.join(out_dir, "region.tif")
    out_depth = os.path.join(out_dir, "depth.tif")
    out_csv_file = os.path.join(out_dir, "regions_info.csv")
    out_vec_file = os.path.join(out_dir, "regions.shp")

    # create output folder if nonexistent
    if not os.path.exists(out_dir):
        os.mkdir(out_dir)

    # load the dem and get dem info
    print("Loading data ...")
    dem = rd.LoadGDAL(in_dem)
    no_data = dem.no_data
    projection = dem.projection
    geotransform = dem.geotransform
    cell_size = np.round(geotransform[1], decimals=2)

    # get min and max elevation of the dem
    max_elev = np.float(np.max(dem))
    min_elev = np.float(np.min(dem[dem > 0]))
    print("min = {:.2f}, max = {:.2f}, no_data = {}, cell_size = {}".format(
        min_elev, max_elev, no_data, cell_size))

    # depression filling
    print("Depression filling ...")
    dem_filled = rd.FillDepressions(dem, in_place=False)
    dem_diff = dem_filled - dem
    dem_diff.no_data = 0

    print("Saving filled dem ...")
    rd.SaveGDAL(out_dem_filled, dem_filled)
    rd.SaveGDAL(out_dem_diff, dem_diff)

    # nb_labels is the total number of objects. 0 represents background object.
    print("Region grouping ...")
    label_objects, nb_labels = regionGroup(dem_diff, min_size, no_data)
    dem_diff[label_objects == 0] = 0
    depth = np2rdarray(dem_diff,
                       no_data=0,
                       projection=projection,
                       geotransform=geotransform)
    rd.SaveGDAL(out_depth, depth)
    del dem_diff, depth

    print("Computing properties ...")
    # objects = measure.regionprops(label_objects, dem, coordinates='xy')
    objects = measure.regionprops(label_objects, dem)
    dep_list = get_dep_props(objects, cell_size)
    write_dep_csv(dep_list, out_csv_file)
    del objects, dep_list

    # convert numpy to richdem data format
    region = np2rdarray(label_objects,
                        no_data=0,
                        projection=projection,
                        geotransform=geotransform)
    del label_objects

    print("Saving sink dem ...")
    sink = np.copy(dem)
    sink[region == 0] = 0
    sink = np2rdarray(sink,
                      no_data=0,
                      projection=projection,
                      geotransform=geotransform)
    rd.SaveGDAL(out_sink, sink)
    # del sink

    print("Saving refined dem ...")
    dem_refined = dem_filled
    dem_refined[region > 0] = dem[region > 0]
    dem_refined = np2rdarray(dem_refined,
                             no_data=no_data,
                             projection=projection,
                             geotransform=geotransform)
    rd.SaveGDAL(out_dem, dem_refined)
    rd.SaveGDAL(out_region, region)
    del dem_refined, region, dem

    print("Converting raster to vector ...")
    polygonize(out_region, out_vec_file)

    end_time = time.time()
    print("Total run time:\t\t\t {:.4f} s\n".format(end_time - start_time))

    return out_sink
コード例 #29
0
def DelineateDepressions(in_sink, min_size, min_depth, interval, out_dir, bool_level_shp=False):

    # The following parameters can be used by default
    interval = interval * (-1)  # convert slicing interval to negative value

    out_img_dir = os.path.join(out_dir, "img-level")
    out_shp_dir = os.path.join(out_dir, "shp-level")
    out_obj_file = os.path.join(out_dir, "depression_id.tif")
    out_level_file = os.path.join(out_dir, "depression_level.tif")
    out_vec_file = os.path.join(out_dir, "depressions.shp")
    out_csv_file = os.path.join(out_dir, "depressions_info.csv")

    init_time = time.time()

    # delete contents in output folder if existing
    if not os.path.exists(out_dir):
        os.mkdir(out_dir)
    if os.path.exists(out_img_dir):
        shutil.rmtree(out_img_dir)
    os.mkdir(out_img_dir)
    if os.path.exists(out_shp_dir):
        shutil.rmtree(out_shp_dir)
    os.mkdir(out_shp_dir)

    print("Reading data ...")
    read_time = time.time()

    image = rd.LoadGDAL(in_sink)
    no_data_raw, projection, geotransform, resolution = getMetadata(image)
    rows_cols = image.shape
    print("rows, cols: " + str(rows_cols))
    print("Pixel resolution: " + str(resolution))
    print("Read data time: {:.4f} seconds".format(time.time() - read_time))

    min_elev, max_elev, no_data = get_min_max_nodata(image)  # set nodata value to a large value, e.g., 9999
    # initialize output image
    obj_image = np.zeros(image.shape)  # output depression image with unique id for each nested depression
    level_image = np.zeros(image.shape)  # output depression level image

    # nb_labels is the total number of objects. 0 represents background object.
    label_objects, nb_labels = regionGroup(image, min_size, no_data)
    # regions = measure.regionprops(label_objects, image, coordinates='xy')
    regions = measure.regionprops(label_objects, image)
    del image  # delete the original image to save memory
    prep_time = time.time()
    print("Data preparation time: {:.4f} seconds".format(prep_time - init_time))
    print("Total number of regions: {}".format(nb_labels))

    identify_time = time.time()

    obj_uid = 0
    global_dep_list = []

    # loop through regions and identify nested depressions in each region using level-set method
    for region in regions:  # iterate through each depression region
        region_id = region.label
        img = region.intensity_image  # dem subset for each region
        bbox = region.bbox

        # save all input parameters needed for level set methods as a dict
        image_paras = set_image_paras(no_data, min_size, min_depth, interval, resolution)

        # execute level set methods
        out_obj, dep_list = levelSet(img, region_id, obj_uid, image_paras)

        for dep in dep_list:
            global_dep_list.append(dep)

        obj_uid += len(dep_list)

        level_obj = obj_to_level(out_obj, global_dep_list)
        obj_image = writeObject(obj_image, out_obj, bbox)       # write region to whole image
        level_image = writeObject(level_image, level_obj, bbox)

        del out_obj, level_obj, region

    del regions, label_objects

    print("=========== Run time statistics =========== ")
    print("(rows, cols):\t\t\t {0}".format(str(rows_cols)))
    print("Pixel resolution:\t\t {0} m".format(str(resolution)))
    print("Number of regions:\t\t {0}".format(str(nb_labels)))
    print("Data preparation time:\t\t {:.4f} s".format(prep_time - init_time))
    print("Identify level time:\t\t {:.4f} s".format(time.time() - identify_time))

    write_time = time.time()
    # writeRaster(obj_image, out_obj_file, in_sink)
    # writeRaster(level_image, out_level_file, in_sink)
    # SaveGDAL function can only save data as floating point
    level_image = np2rdarray(np.int32(level_image), no_data_raw, projection, geotransform)
    rd.SaveGDAL(out_level_file, level_image)
    obj_image = np2rdarray(np.int32(obj_image), no_data_raw, projection, geotransform)
    rd.SaveGDAL(out_obj_file, obj_image)
    print("Write image time:\t\t {:.4f} s".format(time.time() - write_time))

    # converting object image to polygon
    level_time = time.time()
    polygonize(out_obj_file, out_vec_file)
    write_dep_csv(global_dep_list, out_csv_file)
    print("Polygonize time:\t\t {:.4f} s".format(time.time() - level_time))

    # extracting polygons for each individual level
    if bool_level_shp:
        level_time = time.time()
        extract_levels(level_image, obj_image, min_size, no_data, out_img_dir, out_shp_dir, in_sink, False)
        print("Extract level time:\t\t {:.4f} s".format(time.time() - level_time))
        shutil.rmtree(out_img_dir)
    else:
        shutil.rmtree(out_shp_dir)
        shutil.rmtree(out_img_dir)
    del level_image
    del obj_image

    end_time = time.time()
    print("Total run time:\t\t\t {:.4f} s".format(end_time - init_time))
    return out_obj_file, out_level_file
コード例 #30
0
# =============================
# 3. Calculate slope and aspect

# 3.1. 30`` resolution

# Read elevarion with rd.LoadGDAL
elevation_raw = rd.LoadGDAL("data/ASTERGDEM3/Processed/Elevation_30s.tif",
                            no_data=-9999)

# Calculate slope
slope = rd.TerrainAttribute(elevation_raw, attrib='slope_riserun')
rd.rdShow(slope, axes=False, cmap='magma', figsize=(8, 5.5))

# Export as tif
rd.SaveGDAL("data/ASTERGDEM3/Processed/Slope_30s.tif", slope)

# Calculate aspect
aspect = rd.TerrainAttribute(elevation_raw, attrib='aspect')
rd.rdShow(aspect, axes=False, cmap='jet', figsize=(8, 5.5))

# Export as tif
rd.SaveGDAL("data/ASTERGDEM3/Processed/Aspect_30s.tif", aspect)

# 3.2. 2.5` resolution

# Read elevarion with rd.LoadGDAL
elevation_raw = rd.LoadGDAL("data/ASTERGDEM3/Processed/Elevation_2p5min.tif",
                            no_data=-9999)

# Calculate slope