def derive_from_dem(dem):
    """derive slope and flow direction from a DEM.
    Results are returned in a dictionary that contains references to
    ArcPy Raster objects stored in the "in_memory" (temporary) workspace
    """

    # set the snap raster for subsequent operations
    env.snapRaster = dem

    # calculate flow direction for the whole DEM
    flowdir = FlowDirection(in_surface_raster=dem, force_flow="NORMAL")
    flow_direction_raster = so("flowdir", "random", "in_memory")
    flowdir.save(flow_direction_raster)

    # calculate slope for the whole DEM
    slope = Slope(in_raster=dem,
                  output_measurement="PERCENT_RISE",
                  method="PLANAR")
    slope_raster = so("slope", "random", "in_memory")
    slope.save(slope_raster)

    return {
        "flow_direction_raster": Raster(flow_direction_raster),
        "slope_raster": Raster(slope_raster),
    }
def calc_catchment_flowlength_max(
        catchment_area_raster,
        zone_value,
        flow_direction_raster,
        length_conv_factor  #???
):
    """
    Derives flow length for a *single catchment area using a provided zone
    value (the "Value" column of the catchment_area_raster's attr table).
    
    Inputs:
        catchment_area: *raster* representing the catchment area(s)
        zone_value: an integer from the "Value" column of the
            catchment_area_raster's attr table.
        flow_direction_raster: flow direction raster for the broader
    outputs:
        returns the 
    """
    # use watershed raster to clip flow_direction, slope rasters
    # make a raster object with the catchment_area_raster raster
    if not isinstance(catchment_area_raster, Raster):
        c = Raster(catchment_area_raster)
    else:
        c = catchment_area_raster
    # clip the flow direction raster to the catchment area (zone value)
    fd = SetNull(c != zone_value, flow_direction_raster)
    # calculate flow length
    fl = FlowLength(fd, "UPSTREAM")
    # determine maximum flow length
    fl_max = fl.maximum
    #TODO: convert length to ? using length_conv_factor (detected from the flow direction raster)
    fl_max = fl_max * length_conv_factor

    return fl_max
Exemple #3
0
 def __init__(self, fileIn):
     self.fileIn = Raster(fileIn)
     self.width = self.fileIn.width
     self.height = self.fileIn.height
     self.noData = self.fileIn.noDataValue
     arcpy.env.outputCoordinateSystem = self.fileIn
     arcpy.env.overwriteOutput = True
Exemple #4
0
def rasterfile_info(fname, prn=False):
    """Obtain raster stack information from the filename of an image
    :
    """
    #
    frmt = """
    File path   - {}
    Name        - {}
    Spatial Ref - {}
    Raster type - {}
    Integer?    - {}
    NoData      - {}
    Min         - {}
    Max         - {}
    Mean        - {}
    Std dev     - {}
    Bands       - {}
    Cell        - h {}   w {}
    Lower Left  - X {}   Y {}
    Upper Left  - X {}   Y {}
    Extent      - h {}   w {}
    """
    desc = Describe(fname)
    r_data_type = desc.datasetType  # 'RasterDataset'
    args = []
    if r_data_type == 'RasterDataset':
        r = Raster(fname)
        r.catalogPath  # full path name and file name
        pth = r.path  # path only
        name = r.name  # file name
        SR = r.spatialReference
        r_type = r.format  # 'TIFF'
        #
        is_int = r.isInteger
        nodata = r.noDataValue
        r_max = r.maximum
        r_min = r.minimum
        r_mean = "N/A"
        r_std = "N/A"
        if not is_int:
            r_mean = r.mean
            r_std = r.standardDeviation
        bands = r.bandCount
        cell_hght = r.meanCellHeight
        cell_wdth = r.meanCellWidth
        extent = desc.Extent
        LL = extent.lowerLeft  # Point (X, Y, #, #)
        hght = r.height
        wdth = r.width
        UL = r.extent.upperLeft
        args = [
            pth, name, SR.name, r_type, is_int, nodata, r_min, r_max, r_mean,
            r_std, bands, cell_hght, cell_wdth, LL.X, LL.Y, UL.X, UL.Y, hght,
            wdth
        ]
    if prn:
        tweet(dedent(frmt).format(*args))
    else:
        return args
def prep_cn_raster(
    dem,
    curve_number_raster,
    out_cn_raster=None,
    out_coor_system="PROJCS['NAD_1983_StatePlane_Pennsylvania_South_FIPS_3702_Feet',GEOGCS['GCS_North_American_1983',DATUM['D_North_American_1983',SPHEROID['GRS_1980',6378137.0,298.257222101]],PRIMEM['Greenwich',0.0],UNIT['Degree',0.0174532925199433]],PROJECTION['Lambert_Conformal_Conic'],PARAMETER['False_Easting',1968500.0],PARAMETER['False_Northing',0.0],PARAMETER['Central_Meridian',-77.75],PARAMETER['Standard_Parallel_1',39.93333333333333],PARAMETER['Standard_Parallel_2',40.96666666666667],PARAMETER['Latitude_Of_Origin',39.33333333333334],UNIT['Foot_US',0.3048006096012192]]"
):
    """
    Clip, reproject, and resample the curve number raster to match the DEM.
    Ensure everything utilizes the DEM as the snap raster.
    The result is returned in a dictionary referencing an ArcPy Raster object
    for the file gdb location of the processed curve number raster.
    
    For any given study area, this will only need to be run once.
    """

    # make the DEM an ArcPy Raster object, so we can get the raster properties
    if not isinstance(dem, Raster):
        dem = Raster(dem)

    msg("Clipping...")
    # clip the curve number raster, since it is likely for a broader study area
    clipped_cn = so("cn_clipped")
    Clip_management(in_raster=curve_number_raster,
                    out_raster=clipped_cn,
                    in_template_dataset=dem,
                    clipping_geometry="NONE",
                    maintain_clipping_extent="NO_MAINTAIN_EXTENT")

    # set the snap raster for subsequent operations
    env.snapRaster = dem

    # reproject and resample he curve number raster to match the dem
    if not out_cn_raster:
        prepped_cn = so("cn_prepped")
    else:
        prepped_cn = out_cn_raster
    msg("Projecting and Resampling...")
    ProjectRaster_management(in_raster=clipped_cn,
                             out_raster=prepped_cn,
                             out_coor_system=out_coor_system,
                             resampling_type="NEAREST",
                             cell_size=dem.meanCellWidth)

    return {"curve_number_raster": Raster(prepped_cn)}
 def _try_get_raster_data(self, image, bands) -> RasterData:
     from arcpy import Raster, RasterToNumPyArray
     raster = Raster(image)
     img_data = RasterToNumPyArray(raster)
     if len(img_data.shape) == 3:
         img_data = img_data.transpose((1, 2, 0)).squeeze()
         if bands:
             img_data = img_data[:, :, bands].squeeze()
     extent = self._get_extent(raster)
     return RasterData(img_data=img_data,
                       extent=extent,
                       id_=str(hash(raster.extent.JSON + raster.name)))
Exemple #7
0
 def testLZWCompression(self):
     with TempDir() as d:
         arcpy.ImportToolbox(config.pyt_file)
         arcpy.multiplescales_btm(self.in_raster, self.nbh_sizes,
                                  self.metrics, d)
         rast_names = [
             'bathy5m_clip_mean_003.tif', 'bathy5m_clip_sdev_003.tif',
             'bathy5m_clip_var_003.tif', 'bathy5m_clip_vrm_003.tif',
             'bathy5m_clip_mean_013.tif', 'bathy5m_clip_sdev_013.tif',
             'bathy5m_clip_var_013.tif', 'bathy5m_clip_vrm_013.tif',
             'bathy5m_clip_iqr_003.tif', 'bathy5m_clip_kurt_003.tif',
             'bathy5m_clip_iqr_013.tif', 'bathy5m_clip_kurt_013.tif'
         ]
         for each in rast_names:
             file_name = os.path.join(d, each)
             self.assertEqual(str(Raster(file_name).compressionType), 'LZW')
Exemple #8
0
      'float_kind': '{: 0.3f}'.format}
np.set_printoptions(edgeitems=5, linewidth=80, precision=2, suppress=True,
                    threshold=100, formatter=ft)
np.ma.masked_print_option.set_display('-')  # change to a single -

script = sys.argv[0]  # print this should you need to locate the script

env.overwriteOutput = True

# ----------------------------------------------------------------------
# .... final code section producing the featureclass and extendtable
if len(sys.argv) == 1:
    testing = True
    pth = script.split("/")[:-2]
    pth0 = "/".join(pth) + "/Data/r00.tif"
    r = Raster(pth0)
    out_arr = "/".join(pth) + "/Data/r01.npy"
    frmt = "Result...\n{}"
#    print(frmt.format(a))
else:
    testing = False
    pth = sys.argv[1]
    out_arr = sys.argv[2]
    r = Raster(pth)
# parameters here
LL = r.extent.lowerLeft
cols = int(r.extent.width/r.meanCellWidth)
rows = int(r.extent.height/r.meanCellWidth)
a = RasterToNumPyArray(r,
                       lower_left_corner=Point(LL.X, LL.Y),
                       ncols=cols,
def derive_data_from_catchments(catchment_areas,
                                flow_direction_raster,
                                slope_raster,
                                curve_number_raster,
                                area_conv_factor=0.00000009290304,
                                length_conv_factor=1,
                                out_catchment_polygons=None):
    """
    For tools that handle multiple inputs quickly, we execute here (e.g., zonal
    stats). For those we need to run on individual catchments, this parses the
    catchments raster and passes individual catchments, along with other required 
    data, to the calc_catchment_flowlength_max function.

    area_conversion_factor: for converting the area of the catchments to Sq. Km, which is 
        expected by the core business logic. By default, the factor converts from square feet 
    out_catchment_polygons: will optionally return a catchment polygon feature class.

    Output: an array of records containing info about each inlet's catchment, e.g.:
        [
            {
                "id": <ID value from pour_point_field (spec'd in catchment_delineation func)> 
                "area_sqkm": <area of inlet's catchment in square km>
                "avg_slope": <average slope of DEM in catchment>
                "avg_cn": <average curve number in the catchment>
                "max_fl": <maximum flow length in the catchment>
            },
            {...},
            ...
         ]
    """
    raster_field = "Value"

    # store the results, keyed by a catchment ID (int) that comes from the
    # catchments layer gridcode
    results = {}

    # make a raster object with the catchment raster
    if not isinstance(catchment_areas, Raster):
        c = Raster(catchment_areas)
    else:
        c = catchment_areas
    # if the catchment raster does not have an attribute table, build one
    if not c.hasRAT:
        BuildRasterAttributeTable_management(c, "Overwrite")

    # make a table view of the catchment raster
    catchment_table = 'catchment_table'
    MakeTableView_management(
        c, catchment_table)  #, {where_clause}, {workspace}, {field_info})

    # calculate flow length for each zone. Zones must be isolated as individual
    # rasters for this to work. We handle that with calc_catchment_flowlength_max()
    # using the table to get the zone values...
    catchment_count = int(GetCount_management(catchment_table).getOutput(0))
    with SearchCursor(catchment_table, [raster_field]) as catchments:

        # TODO: implement multi-processing for this loop.

        ResetProgressor()
        SetProgressor('step', "Mapping flow length for catchments", 0,
                      catchment_count, 1)
        # msg("Mapping flow length for catchments")

        for idx, each in enumerate(catchments):
            this_id = each[0]
            # msg("{0}".format(this_id))
            # calculate flow length for each "zone" in the raster
            fl_max = calc_catchment_flowlength_max(catchment_areas, this_id,
                                                   flow_direction_raster,
                                                   length_conv_factor)
            if this_id in results.keys():
                results[this_id]["max_fl"] = clean(fl_max)
            else:
                results[this_id] = {"max_fl": clean(fl_max)}
            SetProgressorPosition()
        ResetProgressor()

    # calculate average curve number within each catchment for all catchments
    table_cns = so("cn_zs_table", "timestamp", "fgdb")
    msg("CN Table: {0}".format(table_cns))
    ZonalStatisticsAsTable(catchment_areas, raster_field, curve_number_raster,
                           table_cns, "DATA", "MEAN")
    # push table into results object
    with SearchCursor(table_cns, [raster_field, "MEAN"]) as c:
        for r in c:
            this_id = r[0]
            this_area = r[1]
            if this_id in results.keys():
                results[this_id]["avg_cn"] = clean(this_area)
            else:
                results[this_id] = {"avg_cn": clean(this_area)}

    # calculate average slope within each catchment for all catchments
    table_slopes = so("slopes_zs_table", "timestamp", "fgdb")
    msg("Slopes Table: {0}".format(table_slopes))
    ZonalStatisticsAsTable(catchment_areas, raster_field, slope_raster,
                           table_slopes, "DATA", "MEAN")
    # push table into results object
    with SearchCursor(table_slopes, [raster_field, "MEAN"]) as c:
        for r in c:
            this_id = r[0]
            this_area = r[1]
            if this_id in results.keys():
                results[this_id]["avg_slope"] = clean(this_area)
            else:
                results[this_id] = {"avg_slope": clean(this_area)}

    # calculate area of each catchment
    #ZonalGeometryAsTable(catchment_areas,"Value","output_table") # crashes like an mfer
    cp = so("catchmentpolygons", "timestamp", "in_memory")
    #RasterToPolygon copies our ids from raster_field into "gridcode"
    RasterToPolygon_conversion(catchment_areas, cp, "NO_SIMPLIFY",
                               raster_field)

    # Dissolve the converted polygons, since some of the raster zones may have corner-corner links
    if not out_catchment_polygons:
        cpd = so("catchmentpolygonsdissolved", "timestamp", "in_memory")
    else:
        cpd = out_catchment_polygons
    Dissolve_management(in_features=cp,
                        out_feature_class=cpd,
                        dissolve_field="gridcode",
                        multi_part="MULTI_PART")

    # get the area for each record, and push into results object
    with SearchCursor(cpd, ["gridcode", "SHAPE@AREA"]) as c:
        for r in c:
            this_id = r[0]
            this_area = r[1] * area_conv_factor
            if this_id in results.keys():
                results[this_id]["area_up"] = clean(this_area)
            else:
                results[this_id] = {"area_up": clean(this_area)}

    # flip results object into a records-style array of dictionaries
    # (this makes conversion to table later on simpler)
    # msg(results,"warning")
    records = []
    for k in results.keys():
        record = {
            "area_up": 0,
            "avg_slope": 0,
            "max_fl": 0,
            "avg_cn": 0,
            "tc_hr": 0
        }
        for each_result in record.keys():
            if each_result in results[k].keys():
                record[each_result] = results[k][each_result]
        record["id"] = k
        records.append(record)

    if out_catchment_polygons:
        return records, cpd
    else:
        return records, None
def build_cn_raster(landcover_raster,
                    lookup_csv,
                    soils_polygon,
                    soils_hydrogroup_field="SOIL_HYDRO",
                    reference_raster=None,
                    out_cn_raster=None):
    """Build a curve number raster from landcover raster, soils polygon, and a crosswalk between 
    landcover classes, soil hydro groups, and curve numbers.

    :param lookup_csv: [description]
    :type lookup_csv: [type]
    :param landcover_raster: [description]
    :type landcover_raster: [type]
    :param soils_polygon: polygon containing soils with a hydro classification. 
    :type soils_polygon: [type]
    :param soils_hydrogroup_field: [description], defaults to "SOIL_HYDRO" (from the NCRS soils dataset)
    :type soils_hydrogroup_field: str, optional
    :param out_cn_raster: [description]
    :type out_cn_raster: [type]    
    """

    # GP Environment ----------------------------
    msg("Setting up GP Environment...")
    # if reference_raster is provided, we use it to set the GP environment for
    # subsequent raster operations
    if reference_raster:
        if not isinstance(reference_raster, Raster):
            # read in the reference raster as a Raster object.
            reference_raster = Raster(reference_raster)
    else:
        reference_raster = Raster(landcover_raster)

    # set the snap raster, cell size, and extent, and coordinate system for subsequent operations
    env.snapRaster = reference_raster
    env.cellSize = reference_raster.meanCellWidth
    env.extent = reference_raster
    env.outputCoordinateSystem = reference_raster

    cs = env.outputCoordinateSystem.exportToString()

    # SOILS -------------------------------------

    msg("Processing Soils...")
    # read the soils polygon into a raster, get list(set()) of all cell values from the landcover raster
    soils_raster_path = so("soils_raster")
    PolygonToRaster_conversion(soils_polygon, soils_hydrogroup_field,
                               soils_raster_path, "CELL_CENTER")
    soils_raster = Raster(soils_raster_path)

    # use the raster attribute table to build a lookup of raster values to soil hydro codes
    # from the polygon (that were stored in the raster attribute table after conversion)
    if not soils_raster.hasRAT:
        msg("Soils raster does not have an attribute table. Building...",
            "warning")
        BuildRasterAttributeTable_management(soils_raster, "Overwrite")
    # build a 2D array from the RAT
    fields = ["Value", soils_hydrogroup_field]
    rows = [fields]
    # soils_raster_table = MakeTableView_management(soils_raster_path)
    with SearchCursor(soils_raster_path, fields) as sc:
        for row in sc:
            rows.append([row[0], row[1]])
    # turn that into a dictionary, where the key==soil hydro text and value==the raster cell value
    lookup_from_soils = {v: k for k, v in etl.records(rows)}
    # also capture a list of just the values, used to iterate conditionals later
    soil_values = [v['Value'] for v in etl.records(rows)]

    # LANDCOVER ---------------------------------
    msg("Processing Landcover...")
    if not isinstance(landcover_raster, Raster):
        # read in the reference raster as a Raster object.
        landcover_raster_obj = Raster(landcover_raster)
    landcover_values = []
    with SearchCursor(landcover_raster, ["Value"]) as sc:
        for row in sc:
            landcover_values.append(row[0])

    # LOOKUP TABLE ------------------------------
    msg("Processing Lookup Table...")
    # read the lookup csv, clean it up, and use the lookups from above to limit it to just
    # those values in the rasters
    t = etl\
        .fromcsv(lookup_csv)\
        .convert('utc', int)\
        .convert('cn', int)\
        .select('soil', lambda v: v in lookup_from_soils.keys())\
        .convert('soil', lookup_from_soils)\
        .select('utc', lambda v: v in landcover_values)

    # This gets us a table where we the landcover class (as a number) corresponding to the
    # correct value in the converted soil raster, with the corresponding curve number.

    # DETERMINE CURVE NUMBERS -------------------
    msg("Assigning Curve Numbers...")
    # Use that to reassign cell values using conditional map algebra operations
    cn_rasters = []
    for rec in etl.records(t):
        cn_raster_component = Con(
            (landcover_raster_obj == rec.utc) & (soils_raster == rec.soil),
            rec.cn, 0)
        cn_rasters.append(cn_raster_component)

    cn_raster = CellStatistics(cn_rasters, "MAXIMUM")

    # REPROJECT THE RESULTS -------------------
    msg("Reprojecting and saving the results....")
    if not out_cn_raster:
        out_cn_raster = so("cn_raster", "random", "in_memory")

    ProjectRaster_management(in_raster=cn_raster,
                             out_raster=out_cn_raster,
                             out_coor_system=cs,
                             resampling_type="NEAREST",
                             cell_size=env.cellSize)

    # cn_raster.save(out_cn_raster)
    return out_cn_raster
Exemple #11
0
}


def size_in_mem(size, type, unit='MB'):

    if isinstance(type, np.dtype):
        type = str(type)

    units = {'kB': 1000, 'MB': 1000000, 'GB': 1000000000}
    return (size * PIXELTYPE[type] / units[unit])


inraster = r"C:\Data\Staging\Output\MLVMI_puusto\ositteet\PUULAJI_1_OSITE_1_lpm.img"
ws = os.path.dirname(inraster)

raster = Raster(inraster)
raster_size = raster.width * raster.height

print('Raster shape: %s x %s' % (raster.height, raster.width))
print('Raster size: %s' % (raster_size))
print('Raster pixel type: %s' % raster.pixelType)
# In reality uncrompessed size is smaller (as seen from ArcCatalog) -> maybe
# NoData doesn't take that much space?
print('Raster uncompressed size: %s MB' %
      (size_in_mem(raster_size, raster.pixelType)))
print('float64 will consume: %s MB' % (size_in_mem(raster_size, 'F64')))

array = RasterToNumPyArray(inraster)
print('Array shape: %s x %s' % (array.shape[0], array.shape[1]))
print('Array size: %s' % array.size)
print('Array dtype: %s' % array.dtype)
Exemple #12
0
def main(in_raster=None, areaOfInterest=None, saveTINs=False,
         out_workspace=None):

    if isinstance(saveTINs, str) and saveTINs.lower() == 'false':
        saveTINs = False
    if isinstance(saveTINs, str) and saveTINs.lower() == 'true':
        saveTINs = True

    rastName = os.path.splitext(os.path.split(in_raster)[1])[0]
    bathyRaster = Raster(in_raster)
    cellSize = bathyRaster.meanCellHeight

    with TempDir() as d:
        # Check if multipart polygon and convert to singlepart if true
        with arcpy.da.SearchCursor(areaOfInterest, ["SHAPE@"]) as cursor:
            for row in cursor:
                geometry = row[0]
                if geometry.isMultipart is True:
                    utils.msg("Converting multipart geometry to single parts...")
                    singlepart = os.path.join(d, 'singlepart.shp')
                    arcpy.MultipartToSinglepart_management(areaOfInterest,
                                                           singlepart)
                    arcpy.CopyFeatures_management(singlepart, areaOfInterest)

        # Name temporary files
        elevationTIN = os.path.join(d, 'elevationTIN')
        boundaryBuffer = os.path.join(d, 'bnd_buf.shp')
        boundaryRaster = os.path.join(d, 'bnd_rast.tif')
        boundaryPoints = os.path.join(d, 'bnd_pts.shp')
        pobfRaster = os.path.join(d, 'pobf_rast.tif')

        # Create elevation TIN
        utils.msg("Creating elevation TIN...")
        # just compute statitics
        utils.raster_properties(bathyRaster, attribute=None)
        zTolerance = abs((bathyRaster.maximum - bathyRaster.minimum)/10)
        arcpy.RasterTin_3d(bathyRaster, elevationTIN, str(zTolerance))
        arcpy.EditTin_3d(elevationTIN, ["#", "<None>", "<None>",
                                        "hardclip", "false"])

        # If more than one polygon in areaOfInterest,
        # split into separate files to process
        splitFiles = [areaOfInterest]
        multiple = False
        aoi_count = int(arcpy.GetCount_management(areaOfInterest).getOutput(0))
        if aoi_count > 1:
            multiple = True
            arcpy.AddField_management(areaOfInterest, "Name", "TEXT")
            splitFiles = []
            with arcpy.da.UpdateCursor(areaOfInterest,
                                       "Name") as cursor:
                for (i, row) in enumerate(cursor):
                    row[0] = "poly_{}".format(i)
                    splitFiles.append("in_memory\poly_{}".format(i))
                    cursor.updateRow(row)
            arcpy.Split_analysis(areaOfInterest, areaOfInterest,
                                 'Name', 'in_memory')

        # grab an output directory, we may need it if TINs are being saved
        if out_workspace is None or not os.path.exists(out_workspace):
            # get full path for aoi
            aoi_path = arcpy.Describe(areaOfInterest).catalogPath
            out_dir = os.path.split(aoi_path)[0]
        else:
            out_dir = out_workspace

        # Calculate ACR for each polygon
        pobfs = []
        num_polys = len(splitFiles)
        for (i, each) in enumerate(splitFiles, start=1):
            if num_polys == 1:
                acr_msg = "Calculating ACR Rugosity..."
            else:
                acr_msg = ("Calculating ACR Rugosity for Area "
                           "{} of {}...".format(i, num_polys))
            utils.msg(acr_msg)

            # Create POBF TIN
            arcpy.Buffer_analysis(each, boundaryBuffer,
                                  cellSize, "OUTSIDE_ONLY")
            arcpy.Clip_management(in_raster, '#', boundaryRaster,
                                  boundaryBuffer, '#',
                                  'ClippingGeometry', 'NO_MAINTAIN_EXTENT')
            arcpy.RasterToPoint_conversion(boundaryRaster,
                                           boundaryPoints, 'Value')
            arcpy.GlobalPolynomialInterpolation_ga(boundaryPoints, "grid_code",
                                                   "#", pobfRaster, cellSize)
            arcpy.CalculateStatistics_management(pobfRaster)
            if len(splitFiles) == 1:
                basename = '{}_planarTIN'.format(rastName)
            else:
                basename = '{}_planarTIN_{}'.format(rastName, i)
            pobf_temp = os.path.join(d, basename)
            pobf_perm = os.path.join(out_dir, basename)
            pobfs.append((pobf_temp, pobf_perm))

            zTolerance = abs((int(Raster(pobfRaster).maximum) -
                              int(Raster(pobfRaster).minimum))/10)
            arcpy.RasterTin_3d(pobfRaster, pobf_temp, str(zTolerance))
            arcpy.EditTin_3d(pobf_temp, ["#", "<None>", "<None>",
                                         "hardclip", "false"])
            # Calculate Rugosity
            arcpy.PolygonVolume_3d(elevationTIN, each, "<None>",
                                   "BELOW", "Volume1", "Surf_Area")
            arcpy.PolygonVolume_3d(pobf_temp, each, "<None>",
                                   "BELOW", "Volume2", "Plan_Area")
            arcpy.AddField_management(each, "Rugosity", "DOUBLE")
            arcpy.CalculateField_management(each, "Rugosity",
                                            "!Surf_Area! / !Plan_Area!",
                                            "PYTHON_9.3")
            arcpy.DeleteField_management(each, "Volume2;Volume1;Name")
            # Calculate Slope and Aspect
            arcpy.AddField_management(each, "Slope", "DOUBLE")
            arcpy.AddField_management(each, "Aspect", "DOUBLE")
            pobfXSize = Raster(pobfRaster).meanCellWidth
            pobfYSize = Raster(pobfRaster).meanCellHeight
            pobfArray = arcpy.RasterToNumPyArray(pobfRaster,
                                                 None, 3, 3)
            dz_dx = ((pobfArray[0, 2] + 2 * pobfArray[1, 2] +
                      pobfArray[2, 2]) -
                     (pobfArray[0, 0] + 2 * pobfArray[1, 0] +
                      pobfArray[2, 0])) / (8.0 * pobfXSize)
            dz_dy = ((pobfArray[2, 0] + 2 * pobfArray[2, 1] +
                      pobfArray[2, 2]) -
                     (pobfArray[0, 0] + 2 * pobfArray[0, 1] +
                      pobfArray[0, 2])) / (8.0 * pobfYSize)
            raw_aspect = (180 / np.pi) * np.arctan2(dz_dy, -dz_dx)
            if np.equal(dz_dy, dz_dx) and np.equal(dz_dy, 0):
                aspect = -1
            else:
                if np.equal(raw_aspect, 0):
                    aspect = 90
                elif np.equal(raw_aspect, 90):
                    aspect = 0
                elif raw_aspect > 90:
                    aspect = 360.0 - raw_aspect + 90
                else:
                    aspect = 90.0 - raw_aspect
            with arcpy.da.UpdateCursor(each, ["Slope", "Aspect"]) as cursor:
                for rows in cursor:
                    rows[0] = np.arctan(np.sqrt(dz_dx**2 +
                                                dz_dy**2))*(180/np.pi)
                    rows[1] = aspect
                    cursor.updateRow(rows)

        # Merge split files and save to input file location
        if multiple:
            arcpy.Merge_management(splitFiles, areaOfInterest)

        # Save TINs if requested
        if saveTINs:
            utils.msg("Saving elevation and planar TINs to "
                      "{}...".format(out_dir))
            arcpy.CopyTin_3d(elevationTIN,
                             os.path.join(out_dir,
                                          '{}_elevationTIN'.format(rastName)))

            for (pobf_temp, pobf_perm) in pobfs:
                arcpy.CopyTin_3d(pobf_temp, pobf_perm)
Exemple #13
0
def main(
    inlets, 
    flow_dir_raster, 
    slope_raster, 
    cn_raster, 
    precip_table_noaa, 
    output, 
    output_catchments=None, 
    pour_point_field=None, 
    input_watershed_raster=None, 
    area_conv_factor=0.00000009290304, 
    length_conv_factor=1,
    output_fields=OUTPUT_FIELDS,
    convert_to_imperial=True
    ):
    """Main controller for running the drainage/peak-flow calculator with geospatial data
    
    Arguments:
        inlets {point feature layer} -- point features representing 
            inlets/catchbasins (i.e., point at which peak flow is being assessed)
        flow_dir_raster {raster layer} -- flow direction raster, derived from 
            a user-corrected DEM for an entire study area
        slope_raster {raster layer} -- a slope raster, derived from an 
            *un-corrected* DEM for an entire study area
        cn_raster {raster layer} -- Curve Number raster, derived using 
            prep_cn_raster() tool
        precip_table_noaa {path to csv} -- preciptation table from NOAA (csv)
        output {path for new point feature class} -- output point features; this is 
            a copy of the original inlets, with peak flow calculations appended
    
    Keyword Arguments:
        pour_point_field {field name} -- <optional> name of field containing unique IDs for
            the inlets feature class. Uses the OID/FID/GUID field (default: {None})
        input_watershed_raster {raster layer} -- <optional> , pre-calculated watershed 
            raster for the study area. If used, the values in each catchment must 
            correspond to values in the *pour_point_field* for the *inlets* (default: {None})
        area_conv_factor {float} -- <optional>  (default: 0.00000009290304)
        output_catchments {path for new polygon feature class} -- <optional>  output polygon 
            features; this is a vectorized version of the delineated watershed(s), with peak flow 
            calculations appended (default: {None})

    Returns:
        a tuple of two paths (strings): [0] = path to the output points and, if 
            specified, [1] = path to the output_catchments

    """

    # -----------------------------------------------------
    # SET ENVIRONMENT VARIABLES
    
    msg('Setting environment parameters...', set_progressor_label=True)
    env_raster = Raster(flow_dir_raster)
    env.snapRaster = env_raster
    env.cellSize = (env_raster.meanCellHeight + env_raster.meanCellWidth) / 2.0
    env.extent = env_raster.extent
    # for i in ListEnvironments():
    #     msg("\t%-31s: %s" % (i, env[i]))

    # -----------------------------------------------------
    # DETERMINE UNITS OF INPUT DATASETS

    msg('Determing units of reference raster dataset...', set_progressor_label=True)
    # get the name of the linear unit from env_raster
    unit_name = env_raster.spatialReference.linearUnitName
    acf, lcf = None, None
    # attempt to auto-dectect unit names for use with the Pint package
    if unit_name:
        if 'foot'.upper() in unit_name.upper():
            acf = 1 * units.square_foot
            lcf = 1 * units.foot
            msg("...auto-detected 'feet' from the source data")
        elif 'meter'.upper() in unit_name.upper():
            acf = 1 * (units.meter ** 2)
            lcf = 1 * units.meter
            msg("...auto-detected 'meters' from the source data")
        else:
            msg("Could not determine conversion factor for '{0}'".format(unit_name))
    else:
        msg("Reference raster dataset has no spatial reference information.")
    if acf and lcf:
        # get correct conversion factor for casting units to that required by equations in calc.py
        area_conv_factor = acf.to(units.kilometer ** 2).magnitude #square kilometers
        length_conv_factor = lcf.to(units.meter).magnitude #meters
        msg("Area conversion factor: {0}".format(area_conv_factor))
        msg("Length conversion factor: {0}".format(length_conv_factor))

    # -----------------------------------------------------
    # READ IN THE PRECIP TABLE

    msg('Loading precipitation table...', set_progressor_label=True)
    precip_tab = precip_table_etl_noaa(precip_table=precip_table_noaa)
    precip_tab_1d = precip_tab[0]

    # -----------------------------------------------------
    # PREPARE THE INPUTS/POUR POINTS
    
    msg('Prepping inlets...', set_progressor_label=True)

    if isinstance(inlets, FeatureSet):
        msg("(reading from interactive selection)", set_progressor_label=True)
        print(inlets)
        inlets_fs = so("inlets_featurset")
        inlets.save(inlets_fs)
        inlets = inlets_fs

    CopyFeatures_management(
        in_features=inlets, 
        out_feature_class=output
    )
    inlets_copy = output

    if not pour_point_field:
        i = Describe(inlets)
        if i.hasOID:
            pour_point_field = i.OIDFieldName
        # AddGlobalIDs_management(in_datasets="Inlet_Move10")

    # -----------------------------------------------------
    # DELINEATE WATERSHEDS

    if not input_watershed_raster:
        msg('Delineating catchments from inlets...', set_progressor_label=True)
        catchment_results = catchment_delineation(
            inlets=inlets_copy,
            flow_direction_raster=flow_dir_raster,
            pour_point_field=pour_point_field
        )
        catchment_areas = catchment_results['catchments']
        msg("Analyzing Peak Flow for {0} inlet(s)".format(catchment_results['count']), set_progressor_label=True)
    else:
        catchment_areas = input_watershed_raster

    # -----------------------------------------------------
    # DERIVE CHARACTERISTICS FROM EACH CATCHMENT NEEDED TO CALCULATE PEAK FLOW
    # area, maximum flow length, average slope, average curve number

    msg('Deriving calculation parameters for catchments...', set_progressor_label=True)
    
    catchment_data, catchment_geom = derive_data_from_catchments(
        catchment_areas=catchment_areas,
        flow_direction_raster=flow_dir_raster,
        slope_raster=slope_raster,
        curve_number_raster=cn_raster,
        area_conv_factor=area_conv_factor,
        length_conv_factor=length_conv_factor,
        out_catchment_polygons=output_catchments
    )

    all_results = []

    # -----------------------------------------------------
    # CALCULATE PEAK FLOW FOR EACH CATCHMENT

    SetProgressor('step', 'Analyzing catchments', 0, len(catchment_data),1)
    for idx, each_catchment in enumerate(catchment_data):
        
        msg("\n-----\nAnalyzing {0}".format(each_catchment["id"]))
        for i in each_catchment.items():
            msg("\t%-12s: %s" % (i[0], i[1]))

        # -----------------------------------------------------
        # CALCULATE TIME OF CONCENTRATION (Tc)

        # calculate the t of c parameter for this catchment
        time_of_concentration = calculate_tc(
            max_flow_length=each_catchment['max_fl'], 
            mean_slope=each_catchment['avg_slope'],
        )

        # -----------------------------------------------------
        # CALCULATE PEAK FLOW FOR ALL PRECIP PERIODS        

        # generate peak flow estimates for the catchment for all storm frequencies in QP_HEADER
        peak_flow_ests = calculate_peak_flow(
            catchment_area_sqkm=each_catchment['area_up'], 
            tc_hr=time_of_concentration,
            avg_cn=each_catchment['avg_cn'],
            precip_table=precip_tab_1d,
            qp_header=QP_HEADER
        )

        # -----------------------------------------------------
        # BUILD A RESULT OBJECT
        
        #extend the peak_flow_ests dict with the catchment params dict
        peak_flow_ests.update(each_catchment)
        # update with other metric(s) we've generated
        peak_flow_ests['tc_hr'] = time_of_concentration
        # add in the pour point ID field and value
        peak_flow_ests[pour_point_field] = each_catchment['id']

        # append that to the all_results list
        all_results.append(peak_flow_ests)

        SetProgressorPosition()
    
    ResetProgressor()

    # convert our sequence of Python dicts into a table
    results_table = etl.fromdicts(all_results)

    # -----------------------------------------------------
    # CONVERT OUTPUT UNITS TO IMPERIAL (by default)
    
    # run unit conversions from metric to imperial if convert_to_imperial
    if convert_to_imperial:
        results_table = etl\
            .convert(results_table, 'max_fl', lambda v: (v * units.meter).to(units.feet).magnitude)\
            .convert('area_up', lambda v: (v * (units.kilometer ** 2)).to(units.acre).magnitude)\
            .convert({i: lambda v: (v * units.meter ** 3 / units.second).to(units.feet ** 3 / units.second).magnitude for i in QP_HEADER})


    # that last .convert() handles conversion of all the peak flow per storm frequency values from cubic meters/second to cubic feet/second in one go :)

    # -----------------------------------------------------
    # SAVE TO DISK
    
    # save to a csv
    temp_csv = "{0}.csv".format(so("qp_results", "timestamp", "folder"))
    etl.tocsv(results_table, temp_csv)
    msg("Results csv saved: {0}".format(temp_csv))
    # load into a temporary table
    results_table = load_csv(temp_csv)

    # -----------------------------------------------------
    # JOIN RESULTS TO THE GEODATA

    # join that to a copy of the inlets
    msg("Saving results to pour points layer", set_progressor_label=True)
    
    esri_output_fields = ";".join(output_fields)

    JoinField_management(
        in_data=inlets_copy, 
        in_field=pour_point_field, 
        join_table=results_table, 
        join_field=pour_point_field,
        fields=esri_output_fields
    )
    msg("Output inlets (points) saved\n\t{0}".format(inlets_copy))
    if catchment_geom:
        msg("Saving results to catchment layer", set_progressor_label=True)
        JoinField_management(
            in_data=catchment_geom, 
            in_field='gridcode',
            join_table=inlets_copy, 
            join_field=pour_point_field,
            fields=esri_output_fields
        )
        msg("Output catchments (polygons) saved\n\t{0}".format(catchment_geom))
      
    ResetProgressor()
    
    return inlets_copy, catchment_geom
Exemple #14
0
def channel_head_find(por_file_path, map_file_path, text_file_path,
                      flow_acc_raster, flow_dir_raster, curve_Raster,
                      dem_raster, number_of_uni_point, number_contour, method,
                      flage_plot):

    por_point, number_of_por = read_por_point(text_file_path)

    env.workspace = por_file_path
    flow_acc = '%s' % map_file_path + '/' + '%s' % flow_acc_raster
    flow_dir = '%s' % map_file_path + '/' + '%s' % flow_dir_raster
    curve_Raster = '%s' % map_file_path + '/' + '%s' % curve_Raster
    raster = '%s' % map_file_path + '/' + '%s' % dem_raster

    curve_Array = arcpy.RasterToNumPyArray(curve_Raster)

    dsc = arcpy.Describe(curve_Raster)
    X_min = dsc.EXTENT.XMin
    Y_min = dsc.EXTENT.YMin

    X_max = dsc.EXTENT.XMax
    Y_max = dsc.EXTENT.YMax

    dy = dsc.meanCellHeight
    dx = dsc.meanCellWidth

    out_path = por_file_path
    geometry_type = "POINT"
    has_m = "DISABLED"
    has_z = "DISABLED"

    print 'Total number of heads', number_of_por

    Ele_Thresh = np.zeros((number_of_por, 1))

    for p in range(0, number_of_por):

        sys.stdout.write("\r%d-" % p)
        num_curve = 0
        x_data = np.zeros((1, number_of_uni_point))
        y_data = np.zeros((1, number_of_uni_point))
        c_data = np.zeros((1, number_of_uni_point))
        por_data = np.zeros((1, 1))
        org_x_data = np.zeros((1, number_of_uni_point))
        org_y_data = np.zeros((1, number_of_uni_point))
        sorg_x_data = np.zeros((1, number_of_uni_point))
        sorg_y_data = np.zeros((1, number_of_uni_point))

        if flage_plot == 1:
            fig = pl.figure(0)
            ax = fig.add_subplot(111)


##            pl.plot(por_point[p][0],por_point[p][1],'yo')
##            ax.annotate(int(p) , xy=(por_point[p][0], por_point[p][1]))

# making contour lines

        arcpy.env.extent = raster

        out_name = 'por_' + str(p) + '.shp'
        arcpy.CreateFeatureclass_management(out_path, out_name, geometry_type,
                                            "", has_m, has_z,
                                            dsc.SpatialReference)
        cursor = arcpy.da.InsertCursor(out_name, ["SHAPE@"])
        cursor.insertRow(
            [arcpy.Point(float(por_point[p][0]), float(por_point[p][1]))])
        del cursor
        tolerance = 0
        outSnapPour = SnapPourPoint(out_name, flow_acc, tolerance)

        samll_basin = Watershed(flow_dir, outSnapPour)

        basin_Array = arcpy.RasterToNumPyArray(samll_basin)

        poly_bond = '%s' % por_file_path + '/' + 'bond' + str(p) + '.shp'
        buff_poly_bond = '%s' % por_file_path + '/' + 'bond_buffer' + str(
            p) + '.shp'

        arcpy.RasterToPolygon_conversion(samll_basin, poly_bond)
        arcpy.Buffer_analysis(poly_bond, buff_poly_bond, "3 Meters")

        subbasin_B = '%s' % por_file_path + '/subDEM_B_' + str(p) + '.tif'
        arcpy.Clip_management(raster, "", subbasin_B, buff_poly_bond, "",
                              "ClippingGeometry")
        subbasin = Con(
            Raster(subbasin_B) <= por_point[p][2], Raster(subbasin_B))
        subbasin.save('%s' % por_file_path + '/subDEM_' + str(p) + '.tif')

        contour_interval = round(
            (por_point[p][2] - por_point[p][3]) / number_contour, 2)

        arcpy.env.extent = buff_poly_bond

        if contour_interval >= 0.01:
            make_contour(subbasin, 'basin_' + str(p), por_file_path,
                         contour_interval)
            number_of_ID, ID_elevation = read_point_data(
                'basin_' + str(p) + '_point', por_file_path, por_file_path)
        else:
            number_of_ID = 0

        outSnapPour = None
        subbasin = None
        samll_basin = None

        if number_of_ID <= 5:
            print p, '      Small initial of final contour'
        else:
            x_0 = 0
            y_0 = 0
            temp_x = np.zeros((number_of_ID, number_of_uni_point))
            temp_y = np.zeros((number_of_ID, number_of_uni_point))
            sorted_x = np.zeros((number_of_ID, number_of_uni_point))
            sorted_y = np.zeros((number_of_ID, number_of_uni_point))

            for ID in range(0, number_of_ID):
                temp_kapa_1, x_0, y_0, x_p, y_p, x, y = poly_fit(
                    ID, 'basin_' + str(p) + '_point', por_file_path,
                    por_file_path, number_of_uni_point, 0, x_0, y_0)
                for i in range(0, number_of_uni_point):
                    temp_x[ID, i] = x[i]
                    temp_y[ID, i] = y[i]

            sorted_ID_elevation = sorted(ID_elevation,
                                         key=lambda temp_x: temp_x[1],
                                         reverse=True)
            for ID in range(0, number_of_ID):
                org_ID = sorted_ID_elevation[ID][0]
                sorted_x[ID, :] = temp_x[org_ID, :]
                sorted_y[ID, :] = temp_y[org_ID, :]

            org_x, org_y, ID_elevation, number_of_ID = contour_delete(
                sorted_x, sorted_y, sorted_ID_elevation, number_of_ID,
                number_of_uni_point, contour_interval, por_point[p][2],
                por_point[p][3])
            org_x, org_y = contour_direction_fix(org_x, org_y, ID_elevation,
                                                 number_of_ID,
                                                 number_of_uni_point)

            for ID in range(0, number_of_ID):
                org_x_data = np.copy(
                    np.append(org_x_data,
                              org_x[ID, :].reshape(1, number_of_uni_point),
                              axis=0))
                org_y_data = np.copy(
                    np.append(org_y_data,
                              org_y[ID, :].reshape(1, number_of_uni_point),
                              axis=0))

            c = contour_curve(org_x, org_y, curve_Array, number_of_uni_point,
                              number_of_ID, X_min, Y_max, dx, dy)

            for ID in range(0, number_of_ID):
                x_data = np.copy(
                    np.append(x_data,
                              org_x[ID, :].reshape(1, number_of_uni_point),
                              axis=0))
                y_data = np.copy(
                    np.append(y_data,
                              org_y[ID, :].reshape(1, number_of_uni_point),
                              axis=0))
                c_data = np.copy(
                    np.append(c_data,
                              c[ID, :].reshape(1, number_of_uni_point),
                              axis=0))
                por_data = np.copy(
                    np.append(por_data, p * np.ones((1, 1)), axis=0))
                num_curve = num_curve + 1

        for the_file in os.listdir(por_file_path):
            file_path = os.path.join(por_file_path, the_file)
            try:
                if os.path.isfile(file_path):
                    os.unlink(file_path)
            except Exception, e:
                print e

        x_data = np.delete(x_data, 0, 0)
        y_data = np.delete(y_data, 0, 0)
        c_data = np.delete(c_data, 0, 0)
        por_data = np.delete(por_data, 0, 0)

        org_x_data = np.delete(org_x_data, 0, 0)
        org_y_data = np.delete(org_y_data, 0, 0)

        sorg_x_data = np.delete(sorg_x_data, 0, 0)
        sorg_y_data = np.delete(sorg_y_data, 0, 0)

        number_of_k = 2
        number_of_ID = x_data.shape[0]
        min_contour = max(int(number_of_ID * 0.1), 2)

        #print '     Total number of contours = ' , num_curve

        if num_curve <= 5:
            print '     Small number of final contour'
        else:

            if flage_plot == 1:
                for ID in range(0, num_curve - 1):
                    pl.plot(org_x_data[ID, :],
                            org_y_data[ID, :],
                            'g',
                            linewidth=2)
                    ax.annotate(round(ID_elevation[ID, 1]),
                                xy=(org_x_data[ID, 0] - 5,
                                    org_y_data[ID, 0] - 5))

            ele_error = np.zeros((number_of_ID, 2))
            for initial_tresh in range(0, number_of_ID):
                initial_cluster_code = np.zeros((1, number_of_ID))

                for ID in range(0, number_of_ID):
                    if ID > initial_tresh:
                        initial_cluster_code[0][ID] = 1
                if method == 'contour_cluster':
                    x_ave, y_ave = cluster_average(initial_cluster_code,
                                                   x_data, y_data, number_of_k,
                                                   number_of_ID,
                                                   number_of_uni_point)
                    err = cluster_performance(x_data, y_data,
                                              initial_cluster_code, x_ave,
                                              y_ave, number_of_k, number_of_ID,
                                              number_of_uni_point)
                    ele_error[initial_tresh, :] = [
                        ID_elevation[initial_tresh, 1], err
                    ]

            if flage_plot == 1:
                fig = pl.figure(1)
                pl.plot(ele_error[:, 0], ele_error[:, 1], 'ro-')
                for i in range(0, ele_error.shape[0]):
                    print ele_error[i, 0], ele_error[i, 1]

            lenght_error = find_local_min(ele_error)

            ele_tresh = 0
            count = 0
            for i in range(0, number_of_ID):
                if lenght_error[i, 0] > number_of_ID * 0.1:
                    ##                    ele_tresh = ele_tresh + ID_elevation[i , 1] * (lenght_error[i , 0] / lenght_error[i , 1]) ** 2
                    ##                    count = count + (lenght_error[i , 0] / lenght_error[i , 1]) ** 2
                    ele_tresh = ele_tresh + ID_elevation[i, 1] * lenght_error[
                        i, 0]
                    count = count + lenght_error[i, 0]

            if count == 0:
                print p, 'not dominant cluster found'
            else:
                ele_tresh = ele_tresh / count

                ## print p , ele_tresh , count

                Ele_Thresh[por_data[ID, 0], 0] = ele_tresh