Esempio n. 1
0
def split_features(in_features, split_features, splitField, out_workplace):
    if not os.path.exists(out_workplace):
        os.mkdir(out_workplace)
    if len(os.listdir(out_workplace)) > 3:
        return
    print("split_features is working...")
    arcpy.Split_analysis(in_features, split_features, splitField, out_workplace)
Esempio n. 2
0
def split_features(in_feature, split_feature, split_field, out_workplace):
    path_check(out_workplace)
    if len(os.listdir(out_workplace)) > 3:
        print("{} has already been splitted before.".format(in_feature))
        return
    print("{} is splitting by {} via field name {}".format(
        in_feature, split_feature, str(split_field)))
    arcpy.Split_analysis(in_feature, split_feature, split_field, out_workplace)
Esempio n. 3
0
def main():
    arcpy.env.workspace = 'C:\Users\owner\Downloads\Sample_scripts\ch06'
    inputFeature = 'C:\Users\owner\Documents\Learning Materials\Johanson - 500 level\PROJECT\\adm&gaz\NGA_adm\NGA_adm0.shp'
    splitFeature = 'C:\Users\owner\Documents\Learning Materials\Johanson - 500 level\PROJECT\\adm&gaz\NGA_adm\NGA_adm2.shp'
    splitField = 'NAME_1'
    outputWorkspace = 'C:\Users\owner\Downloads\Sample_scripts\ch06'
    arcpy.Split_analysis(inputFeature, splitFeature, splitField,
                         outputWorkspace)
def split_analysis(split_features, radius):
    split_field = "id"
    output_workspace = os.path.normcase(
        os.path.join(OUTPUT_DIR, split_field + str(radius)))
    if not os.path.exists(output_workspace):
        os.mkdir(output_workspace)
    try:
        arcpy.Split_analysis(split_features, split_features, split_field,
                             output_workspace)
    except:
        raise Exception("split process")
    return output_workspace
Esempio n. 5
0
def main():
    try:
        _arrFC = []

        if arcpy.Exists(pathGDBTemp):
            arcpy.Delete_management(pathGDBTemp)

        arcpy.CreateFileGDB_management(pathTemp, nameGDB)

        _inDRENAJE = os.path.join(arcpy.env.workspace, 'ADREN_BRENA_EXT')
        _inMANZANA = os.path.join(arcpy.env.workspace, 'MZA_INEI_POB_C2017')

        arcpy.MakeFeatureLayer_management(_inDRENAJE, 'TEMP_DRENAJE')
        arcpy.MakeFeatureLayer_management(_inMANZANA, 'TEMP_MANZANA')

        arcpy.Split_analysis('TEMP_MANZANA', 'TEMP_DRENAJE', 'N_COD_GYZ',
                             pathGDBTemp)

        arcpy.env.workspace = pathGDBTemp

        datasets = arcpy.ListDatasets(feature_type='feature')
        datasets = [''] + datasets if datasets is not None else []

        # Create FIELD MANAGEMENT
        for ds in datasets:
            for fc in arcpy.ListFeatureClasses(feature_dataset=ds):
                path = os.path.join(arcpy.env.workspace, ds, fc)
                print(arcpy.Describe(path).name)
                arcpy.AddField_management(path, "MANZANA", "TEXT", "", "", 10)
                _arrFC.append(path)

        # Update FIELD MANZANA with name FEATURE CLASS
        fields = ['MANZANA']
        for ds in datasets:
            for fc in arcpy.ListFeatureClasses(feature_dataset=ds):
                path = os.path.join(arcpy.env.workspace, ds, fc)
                with arcpy.da.UpdateCursor(path, fields) as cursor:
                    for row in cursor:
                        row[0] = arcpy.Describe(path).name
                        cursor.updateRow(row)

        arcpy.Merge_management(_arrFC, "MANZANAS_MERGE")

        for fc in _arrFC:
            arcpy.Delete_management(fc)

    except IOError as err:
        print("Error OS")
Esempio n. 6
0
def create_parks(state):
    in_features = os.path.normcase("D:/NDVI/" + state + "/parks/" + state +
                                   " Parks.shp")
    split_features = in_features
    split_field = "NAME"
    out_workspace = os.path.normcase("D:/NDVI/" + state + "/parks/" + state +
                                     " Parks")

    if os.path.exists(out_workspace) is False:
        os.mkdir(out_workspace)
    try:
        arcpy.Split_analysis(in_features, split_features, split_field,
                             out_workspace)
    except:
        print("there are some errors in %s during create parks process" %
              state)
def ExtractRasterByMultiPolygon(shpFile, filedName, originRasterFile,
                                bufferSize, suffix, outPath):
    ## Set environment settings
    if not os.path.isdir(outPath):  ## if outPath is not exist, then build it.
        if outPath != "":
            os.mkdir(outPath)
    env.workspace = outPath
    ## Split polygon by fieldName
    polyNames, flag = ListFieldValues(shpFile, filedName)
    if (flag):
        arcpy.gp.overwriteOutput = 1
        ## Get the cellsize of originRasterFile
        cellSizeResult = arcpy.GetRasterProperties_management(
            originRasterFile, "CELLSIZEX")
        cellSize = cellSizeResult.getOutput(0)
        bufferDistance = float(cellSize) * bufferSize
        arcpy.Split_analysis(shpFile, shpFile, filedName, outPath)
        polyFiles = []
        polyBufferFiles = []
        polyFinalFiles = []
        rasterFiles = []
        for name in polyNames:
            polyFile = outPath + os.sep + name + '.shp'
            polyBufferFile = outPath + os.sep + name + '_buf.shp'
            polyFinalFile = outPath + os.sep + name + '_final.shp'
            if suffix is None:
                rasterFile = outPath + os.sep + name + '.tif'
            else:
                rasterFile = outPath + os.sep + name + suffix + '.tif'
            polyFiles.append(polyFile)
            polyBufferFiles.append(polyBufferFile)
            rasterFiles.append(rasterFile)
            polyFinalFiles.append(polyFinalFile)
            arcpy.Buffer_analysis(polyFile, polyBufferFile, bufferDistance,
                                  "OUTSIDE_ONLY")
            arcpy.Merge_management([polyFile, polyBufferFile], polyFinalFile)

        if arcpy.CheckOutExtension("Spatial") == "CheckedOut":
            for i in range(0, len(polyBufferFiles)):
                tempRaster = arcpy.sa.ExtractByMask(originRasterFile,
                                                    polyFinalFiles[i])
                tempRaster.save(rasterFiles[i])
    else:
        print "The %s is not exist in %s" % (filedName, shpFile)
        return None
 def create_parks(self):
     """
     创建该州的各个parks的shp文件
     :return:
     """
     in_features = os.path.normcase(
         os.path.join(self.state_path, self.state_name + " Parks.shp"))
     split_features = in_features
     split_field = "NAME"
     out_workspace = os.path.normcase(
         os.path.join(self.state_path, self.state_name + " Parks"))
     if os.path.exists(out_workspace) is False:
         os.mkdir(out_workspace)
     try:
         arcpy.Split_analysis(in_features, split_features, split_field,
                              out_workspace)
     except:
         print("there are some errors in %s during create parks process" %
               self.state_name)
Esempio n. 9
0
# The highest/lowest z-scores (within the 1st/99th percentiles) were extracted by county and merged into a statewide data set.
# These data were intersected with land cover pixels for analysis I cover in the paper.

# Manually mask all pixels above 1000 meters in Washington State using "Con" and "Mask" raster tools.

# Convert raster to point for spatial autocorrelation
ex = [
    "Summer_AIR_Points", "Summer_BH_AIR_Points", "Spring_AIR_Points",
    "Spring_BH_AIR_Points", "Winter_AIR_Points", "Winter_BH_AIR_Points",
    "Autumn_AIR_Points", "Autumn_BH_AIR_Points"
]

for e in ex:
    # Split points by county
    arcpy.Split_analysis(
        "C:/Users/eric-/Desktop/MGST_Final/MGST_Final/MGST_Final.gdb/" + e,
        "C:/Users/eric-/Desktop/MGST_Final/MGST_Final/MGST_Final.gdb/WA_State_Boundary",
        "COUNTY", "C:/Users/eric-/Desktop/MGST_Final/MGST_Final/___split")

    print("Split for " + str(e) + " is done.")

    # Hot Spot (Getis-Ord Gi*)
    for n in range(1, 40):
        myfile = os.listdir(
            "C:/Users/eric-/Desktop/MGST_Final/MGST_Final/___split")[5 + (
                (n - 1) * 8)]

        arcpy.stats.HotSpots(
            "C:/Users/eric-/Desktop/MGST_Final/MGST_Final/___split/" + myfile,
            "grid_code",
            "C:/Users/eric-/Desktop/MGST_Final/MGST_Final/__hotspot/" + myfile,
            "INVERSE_DISTANCE", "EUCLIDEAN_DISTANCE", "NONE", 160, "#", "#",
# Name: splitbyfeatures.py
# Description: Split vegetation layer into separate feature classes for each climate zone
# Author: ESRI

# import system modules 
import arcpy 

# Set environment settings
arcpy.env.workspace = 'H:\ArcGIS\Default1.gdb'

# Split layer by boroughs, write to Output.gdb
splitData = 'H:\ArcGIS\Default1.gdb\Households\Households.shp'
splitFeatures = "H:\ArcGIS\Default1.gdb\LondonBorough.shp"
splitField = "ctyua15nm"
outWorkspace = "H:\ArcGIS\Default1.gdb\Households"
clusterTol = "1 Meters"
arcpy.Split_analysis(splitData, splitFeatures, splitField, outWorkspace, clusterTol)
Esempio n. 11
0
    if field.name in 'tempid':
        try:
            deletefields.remove(field.name)
        except:
            pass
    if field.name in 'zone':
        try:
            deletefields.remove(field.name)
        except:
            pass

arcpy.DeleteField_management(zone, deletefields)

# Split so each feature becomes its own feature class in RAM (you need lots of ram for lots of features)
arcpy.AddMessage("Started splitting features...")
arcpy.Split_analysis(zone, zone, "tempid", mem)
arcpy.AddMessage("Done Splitting features.")

arcpy.RefreshCatalog(outfolder)

fcs = arcpy.ListFeatureClasses("*")

arcpy.AddMessage("Starting iteration.")
for fc in fcs:
    name = os.path.basename(fc)
    zstable = ZonalStatisticsAsTable(fc, infield, raster,
                                     os.path.join(mem, name + "zonal"))
    tatable = TabulateArea(fc, infield, raster, "Value",
                           os.path.join(mem, name + "areas"))
    arcpy.Delete_management(fc)
Esempio n. 12
0
import arcpy

arcpy.env.workspace = u'C:\\Assignment3\\'
arcpy.env.overwriteOutput = True
if not arcpy.Exists(u'districts.gdb'):
    arcpy.CreateFileGDB_management(arcpy.env.workspace, 'districts.gdb')
else:
    print "File geodatabase 'districts.gdb' already exists.\nExisting output may be overwritten"
arcpy.env.workspace = u'C:\\Assignment3\\districts.gdb'
arcpy.Split_analysis(u'C:\\Assignment3\\PublicSchools.shp',
                     u'C:\\Assignment3\\SchoolDistricts.shp', "SCHOOL_DIS",
                     arcpy.env.workspace)
    out_feature_class="C:/SA_Fires/SHP/SA_Fire_Pts_SJ.shp",
    join_operation="JOIN_ONE_TO_ONE",
    join_type="KEEP_ALL",
    field_mapping='[*]',
    match_option="WITHIN")

# Method 2: Split & re-merge
arcpy.AddField_management(in_table="SA_Fire_Pts",
    field_name="Location",
    field_type="TEXT",
    field_length="20",
    field_is_nullable="NULLABLE",
    field_is_required="NON_REQUIRED")

arcpy.Split_analysis(in_features="SA_Fire_Pts",
    split_features="SouthAmerica",
    split_field="Name",
    out_workspace="C:SA_Fires")

arcpy.CalculateField_management(in_table="VENEZUELA",
    field="Location",
    expression='"Venezuela"',
    expression_type="PYTHON")

arcpy.CalculateField_management(in_table="ARGENTINA",
    field="Location",
    expression='"Argentina"',
    expression_type="PYTHON")

arcpy.CalculateField_management(in_table="BRAZIL",
    field="Location",
    expression='"Brazil"',
Esempio n. 14
0
    def identifySuitable(self):

        ### Preamble:

        # start_time = time.time()
        # print start_time
        # Check out the ArcGIS Spatial Analyst extension license
        arcpy.CheckOutExtension("Spatial")

        arcpy.env.overwriteOutput = True
        '''
        ############################################################################################################
        ## --------------------------------------- GET ALL INPUTS ----------------------------------------------- ##
        ############################################################################################################
        '''
        #####################
        ## USER SET INPUTS ##
        #####################

        #yourSpace = "R:\\users\\anagha.uppal\\MapRE\\MapRE_Data\\" ##^^ This is the directory path before the IRENA folder structure
        #defaultInputWorkspace = yourSpace + "INPUTS\\" ##^^ enter the path to your DEFAULT INPUT path

        ##########################
        ## SET FIXED PARAMETERS OR INPUTS ##
        ##########################

        arcpy.env.workspace = self.out_suitableSites_gdb

        ## FIXED PARAMETERS
        days = 365
        hours = 8760

        ### Other conditional clauses. Change as needed:
        ifTrue = 1
        ifFalse = 0

        ## BUFFER
        sideType = "FULL"
        endType = "ROUND"
        dissolveType = "ALL"

        selectIntermediate_geoUnits = "in_memory/selectIntermediate_geoUnits"

        ###############
        ## FUNCTIONS ##
        ###############
        def getFields(data):
            fieldList = []
            fields = arcpy.ListFields(data)
            for field in fields:
                fieldList.append(field.name)
            return fieldList

        '''
        #####################################################################################
        #### --------------------------------GEOPROCESSES--------------------------------####
        #####################################################################################
        '''
        '''
        ############################################
        ## Set environments and scratch workspace ##
        ############################################
        '''

        # set environments for raster analyses
        arcpy.env.extent = self.countryBounds
        arcpy.env.mask = self.countryBounds
        arcpy.env.snapRaster = self.templateRaster
        arcpy.env.cellSize = self.templateRaster

        ## INPUTS
        scriptpath = sys.path[0]
        toolpath = os.path.dirname(scriptpath)
        # tooldatapath = os.path.join(toolpath, "FOLDERNAME")
        # datapath = os.path.join(tooldatapath, "FILENAME.")

        ## SET SCRATCH WORKSPACES (AND CREATE SCRATCH.GDB IF IT DOESN'T EXIST)
        # scratchws = env.scratchWorkspace
        # scriptpath = sys.path[0]
        # toolpath = os.path.dirname(scriptpath)
        # if not env.scratchWorkspace:
        #    if not(os.path.exists(os.path.join(toolpath, "Scratch/scratch.gdb"))): # Create new fgdb if one does not already exist
        #        arcpy.AddMessage("Creating fgdb " + os.path.join(toolpath, "Scratch/scratch.gdb"))
        #        arcpy.CreateFileGDB_management(toolpath + "/Scratch", "scratch.gdb")
        #    scratchws = os.path.join(toolpath, "Scratch/scratch.gdb")
        #    arcpy.AddMessage("Set scratch workspace")
        env.scratchWorkspace = self.scratch
        '''
        ##############
        ## Read CSV ##
        ##############
        '''
        with open(self.csvInput, "rt") as csvfile:
            reader = csv.reader(csvfile, delimiter=',')
            fields = next(reader)
            inputData = []
            for row in reader:
                inputData.append(dict(zip(fields, row)))

        ## inputDataPath is a dictionary of all the input datasets
        inputDataPath = {}

        ## populate the inputDataPath for each of the data categories.
        for dataCategory in fields:
            inputDataPath.update({dataCategory: [inputData[0][dataCategory], \
                                                 inputData[1][dataCategory], inputData[2][dataCategory]]})

        #    print dataCategory
        #    if not(inputData[0][dataCategory] == "no"):
        #        if (inputData[1][dataCategory] == "default"):
        #            inputDataPath[dataCategory] = defaultInputWorkspace + inputData[2][dataCategory] ##^^ enter local path for rail file.
        #        elif (inputData[1][dataCategory] == "country"):
        #            inputDataPath[dataCategory] = countryWorkspace + inputData[2][dataCategory] ##^^ enter local path for rail file.
        #        else: print dataCategory + "no data"
        #    print inputDataPath[dataCategory]

        ## Calculate the non-technology-specific conditional rasters for the data categories that may or may not have any datasets. If the data for that category does not exist, then the conditional raster variable is assigned a scalar value of 1
        '''
        ########################
        ## Raster Calculation ##
        ########################
        '''
        ## initiate rasterSelection_constraints
        rasterSelection_constraints = 1

        ## CALCULATE CONSTRAINT-ONLY RASTER
        for constraint in inputDataPath:
            if inputDataPath[constraint][0] == "yes":
                rasterSelection = Con(inputDataPath[constraint][1], ifTrue, ifFalse, \
                                      str(inputDataPath[constraint][2]))
                rasterSelection_constraints = rasterSelection * rasterSelection_constraints
                arcpy.AddMessage("Finished raster calculation for " +
                                 constraint)

        ## LISTS TO HOLD THE AREAS AND WRITE TO CSV
        areaSumList = ['Area_km2']
        generationSumList = ['Generation_MWh']
        areaLabelList = ['Scenarios']
        subunitsList = ['Subregions']

        ## CREATE THRESHOLD SCENARIOS
        for threshold in self.thresholdList:
            resourceArea = Con(self.resourceInput, ifTrue, ifFalse,
                               "Value >= " + str(threshold))
            rasterSelection_final = rasterSelection_constraints * resourceArea
            arcpy.AddMessage(
                "Finished raster calculation for resource threshold: " +
                str(threshold))

            if self.countryBounds == "":
                outExtractByMask = rasterSelection_final
            else:
                outExtractByMask = ExtractByMask(rasterSelection_final,
                                                 self.countryBounds)

            thresholdStr = str(threshold)
            thresholdStr = thresholdStr.replace(".", "_")

            thresholdFileName = self.technology + "_" + thresholdStr
            outputFileName = os.path.join(self.out_suitableSites_gdb, \
                                          str(thresholdFileName) + "_" + self.fileNameSuffix)

            ## Raster to polygon conversion
            intermediate = arcpy.RasterToPolygon_conversion(
                outExtractByMask, "in_memory/intermediate", "NO_SIMPLIFY",
                "Value")
            ## Process: select gridcode = 1
            intermediateFields = getFields(intermediate)
            ## check the name of the "grid code" field in the polygon output.
            if "grid_code" in intermediateFields:
                selectIntermediate = arcpy.Select_analysis(
                    intermediate, "in_memory/selectIntermediate",
                    '"grid_code" = 1')

            if "gridcode" in intermediateFields:
                selectIntermediate = arcpy.Select_analysis(
                    intermediate, "in_memory/selectIntermediate",
                    '"gridcode" = 1')

            ## INTERSECT Geographic Unit of Analysis, if provided
            if arcpy.Exists(self.geoUnits):
                arcpy.AddMessage(
                    "Intersecting by geographic units of analysis")
                arcpy.Intersect_analysis([selectIntermediate, self.geoUnits],
                                         selectIntermediate_geoUnits, "NO_FID")
            else:
                selectIntermediate_geoUnits = selectIntermediate

            # Process: Add Field
            arcpy.AddField_management(selectIntermediate_geoUnits, "Area",
                                      "DOUBLE", "", "", "", "", "NULLABLE",
                                      "NON_REQUIRED", "")

            # Process: Calculate Field
            arcpy.CalculateField_management(selectIntermediate_geoUnits,
                                            "Area",
                                            "!Shape.Area@squarekilometers!",
                                            "PYTHON_9.3", "")

            # Process: select areas above minimum contiguous area and SAVE to file
            select = arcpy.Select_analysis(selectIntermediate_geoUnits, outputFileName, \
                                           '"Area" >= ' + str(self.minArea))

            if self.save_subunits_workspace != "":  ## save subunits
                arcpy.Split_analysis(select, self.geoUnits,
                                     self.geoUnits_attribute,
                                     self.save_subunits_workspace)

            if self.rasterOutput.lower() == 'true':  ##save the raster output
                out_resourceRaster = ExtractByMask(self.resourceInput, select)
                out_resourceRaster.save(outputFileName + "_resourceRaster")

            # get total area of potential:
            arcpy.AddMessage("Finished resource estimate for threshold: " +
                             str(threshold) + ", start calculating area")
            cursor = arcpy.SearchCursor(select)

            if self.geoUnits_attribute == "":
                areaList = []
                generationList = []
                for row in cursor:
                    area = row.getValue("Area")

                    generation = area * self.landUseEfficiency * self.avgCF * 8760 / 1000 * self.landUseDiscount

                    generationList.append(generation)
                    areaList.append(area)
                areaSumList.append(sum(areaList))
                generationSumList.append(sum(generationList))
                areaLabelList.append(
                    str(thresholdFileName) + "_" + self.fileNameSuffix)
                areaTable = [areaLabelList, areaSumList, generationSumList]
            else:
                areaList = []
                generationList = []
                geoUnits_attributeList = []
                areaNameDict = {}
                for row in cursor:
                    attribute = row.getValue(self.geoUnits_attribute)
                    area = row.getValue("Area")
                    if (attribute not in areaNameDict):
                        areaNameDict[attribute] = area
                    elif (attribute in areaNameDict):
                        areaNameDict[
                            attribute] = areaNameDict[attribute] + area

                geoUnits_attributeList = list(areaNameDict.keys())
                areaList = list(areaNameDict.values())
                for key in areaNameDict:
                    generation = areaNameDict[
                        key] * self.landUseEfficiency * self.avgCF * 8760 / 1000 * self.landUseDiscount
                    generationList.append(generation)

                #areaList.append(area)
                #geoUnits_attributeList.append(attribute)
                # geoattrUnique = list(set(geoUnits_attributeList))
                #
                # # initialise data of lists.
                # data = {'Geo Unit': geoUnits_attributeList,
                #         'Area': areaList,
                #         'Generation': generationList,
                #         }
                # # Create DataFrame
                # df = pd.DataFrame(data)
                # for value in geoattrUnique:
                #     areaSumList.append(df.loc[df['Geo Unit'] == value, 'Area'].sum())
                #     generationSumList.append(df.loc[df['Geo Unit'] == value, 'Generation'].sum())
                areaSumList = areaSumList + areaList
                generationSumList = generationSumList + generationList
                subunitsList = subunitsList + geoUnits_attributeList
                areaLabelList.append(
                    str(thresholdFileName) + "_" + self.fileNameSuffix)
                # areaTable = [areaLabelList, subunitsList, areaSumList, generationSumList]
                areaTable = [
                    areaLabelList, subunitsList, areaSumList, generationSumList
                ]

                #areaLabelList.append(str(thresholdFileName) + "_" + self.fileNameSuffix)
                #areaTable = [areaLabelList, geoattrUnique, areaSumList, generationSumList]
        '''
        #######################################
        ## Write area csv for all thresholds ##
        #######################################
        '''

        if arcpy.Exists(self.geoUnits):
            pass

        # Write Area Sums table as CSV file
        with open(self.csvAreaOutput + ".csv", 'w') as csvfile:
            writer = csv.writer(csvfile)
            [writer.writerow(r) for r in areaTable]
Esempio n. 15
0
# Adding a temporary id field to zones
try:
    arcpy.AddField_management(zone, "tempid", "TEXT")
except:
    arcpy.AddMessage(
        "Failed to add the field 'tempid' to the zone feature class. It might already exist. Continuing if it does..."
    )
    pass
arcpy.CalculateField_management(zone, "tempid", '''"temp" + str(!OBJECTID!)''',
                                "PYTHON")

# Splitting zones into single polygon feature classes
mem = "in_memory"
arcpy.env.workspace = mem
arcpy.Split_analysis(zone, zone, "tempid", mem, "10 meters")
arcpy.AddMessage("Done splitting zones.")

# Listing feature classes and performing zonal stats on each individually
fclist = arcpy.ListFeatureClasses("*")
fcs = []
for fc in fclist:
    fcs.append(os.path.join(mem, fc))

for fc in fcs:
    name = os.path.splitext(os.path.basename(fc))[0]
    arcpy.sa.ZonalStatisticsAsTable(fc, idfield, raster,
                                    os.path.join(outfolder, tablesgdb, name))

# Merging tables
arcpy.RefreshCatalog(tablesgdb)
Esempio n. 16
0

# def deleteAll(path):
#     for i in os.listdir(path):
#         path_file=os.path.join(path,i)
#         if os.path.isfile(path_file):
#             os.remove(path_file)
#     print("清空文件夹成功")

try:
    createFolder(outSplitPath)
    createFolder(outPointsPath)
    createFolder(outResultPath)
    arcpy.env.workspace = outSplitPath
    # 1、分割导入的面(如果存在面压盖的情况,会有问题)
    arcpy.Split_analysis(in_feature, in_feature, fieldName, outSplitPath)

    #如果有重叠的面,用这个方法分割
    # cursor = arcpy.SearchCursor(in_feature)
    # for row in cursor:
    #     name = row.getValue(fieldName)
    #     arcpy.AddMessage(name)
    #     outFeaturePoint = outSplitPath + "\\" + name + ".shp"
    #     where_clause = '"' + fieldName + '"' + '=' + '\'' + name + '\''
    #     arcpy.AddMessage("splitFeature" + name)
    #     arcpy.Select_analysis(in_feature, outFeaturePoint, where_clause)

    # 2、遍历分割的面,每个面导出成折点数据

    featureclasses = arcpy.ListFeatureClasses()
    for featureclass in featureclasses:
Esempio n. 17
0
def shp_split(in_features, split_features, split_field, out_workplace):
    arcpy.Split_analysis(in_features, split_features, split_field,
                         out_workplace)
Esempio n. 18
0
def clip():

    arcpy.env.workspace = nhd
    arcpy.RefreshCatalog(nhd)
    arcpy.ResetEnvironments()

    # Burnt and walled mosaiced elevation
    raster = burnt_ned

    # Create a feature dataset in NHD file geodatabase named "HUC8_Albers" in Albers projection
    workspace = arcpy.ListWorkspaces("*", "FileGDB")
    sr = arcpy.SpatialReference()
    sr.factoryCode = 102039
    sr.create()
    arcpy.env.outputCoordinateSystem = sr
    arcpy.env.compression = "None"
    arcpy.env.pyramid = "NONE"
    arcpy.CreateFeatureDataset_management(arcpy.env.workspace, "HUC8_Albers",
                                          sr)

    # HUC8 polygon selected automaticly from input workspace
    inhuc8 = "WBD_HU8"
    inhuc8albers = "WBD_HU8_Albers"

    # Project WBD_HU8 to Albers
    srin = arcpy.SpatialReference()
    srin.factoryCode = 4269
    srin.create()

    arcpy.Project_management(inhuc8, "HUC8_Albers\WBD_HU8_Albers", sr, '',
                             srin)

    # Output goes to feature dataset HUC8_Albers
    outfd = "HUC8_Albers"

    # Splits HUC8 into individual feature classes for each polygon
    arcpy.AddField_management("WBD_HU8_Albers", "Label", "TEXT")
    arcpy.RefreshCatalog(nhd)
    calcexp = '"HUC" + !HUC_8!'
    arcpy.CalculateField_management("WBD_HU8_Albers", "Label", calcexp,
                                    "PYTHON")
    if not os.path.exists(os.path.join(outfolder, "cliptemp")):
        os.mkdir(os.path.join(outfolder, "cliptemp"))
    cliptemp = os.path.join(outfolder, "cliptemp")
    arcpy.FeatureClassToShapefile_conversion("WBD_HU8_Albers", cliptemp)
    wbdshp = os.path.join(cliptemp, "WBD_HU8_Albers.shp")
    arcpy.Split_analysis(wbdshp, wbdshp, "Label", outfd, '')
    shutil.rmtree(cliptemp)

    # Buffer HUC8 feature classes by 5000m
    fcs = arcpy.ListFeatureClasses("", "Polygon", "HUC8_Albers")
    for fc in fcs:
        arcpy.Buffer_analysis(fc, outfd + "\\" + fc + "_buffer", "5000 meters")

    arcpy.RefreshCatalog(nhd)
    arcpy.ResetEnvironments()

    # Clips rasters
    fcs = arcpy.ListFeatureClasses("*_buffer", "Polygon", "HUC8_Albers")
    for fc in fcs:
        arcpy.env.compression = "None"
        arcpy.env.pyramid = "NONE"
        fcshort = fc[3:11]
        arcpy.Clip_management(
            raster, '', outfolder + "\\" + "huc8clips" + nhdsubregion + "\\" +
            "NED" + fcshort + ".tif", fc, "0", "ClippingGeometry")

    return
Esempio n. 19
0
                          bufdist + ' Meters')

for bufdist in buf_dist:
    arcpy.Dissolve_management(int_r_buf + bufdist + '.shp',
                              int_r_buf + bufdist + '_dis.shp', 'segmid')
for bufdist in buf_dist:
    arcpy.AddField_management(int_r_buf + bufdist + '_dis.shp', 'segmidtext',
                              'TEXT')
for bufdist in buf_dist:
    arcpy.CalculateField_management(int_r_buf + bufdist + '_dis.shp',
                                    'segmidtext', '"t"+str(!segmid!)',
                                    'PYTHON_9.3')

#SPLIT ALL DELETE THIS CODE!!!! DNU DNU
arcpy.Split_analysis(
    "r90_dis", "r90_dis", "segmidtext",
    "V:/GIS/projects/streetview/tasks/201402_crashes_streetratings/data/processing/r90_split.gdb",
    "#")
arcpy.Split_analysis(
    "r60_dis", "r60_dis", "segmidtext",
    "V:/GIS/projects/streetview/tasks/201402_crashes_streetratings/data/processing/r60_split.gdb",
    "#")
arcpy.Split_analysis(
    "r30_dis", "r30_dis", "segmidtext",
    "V:/GIS/projects/streetview/tasks/201402_crashes_streetratings/data/processing/r30_split.gdb",
    "#")

#Intersect1Input,Dissolve,EraseSourcewithDissolveasErase,Splittheintersectbyfeatures(split on itseflt ), run erase fc and alsop all the split feautres
for bufdist in buf_dist:
    arcpy.Intersect_analysis(int_r_buf + bufdist + '_dis.shp',
                             int_r_buf + bufdist + '_dis_intself.shp', "ALL",
                             "#", "INPUT")
Esempio n. 20
0
for cls in dicClass:
    pathClass = pathOut + 'Class/'
    if os.path.exists(pathClass) == False:
        os.makedirs(pathClass)
    shpClass = pathClass + str(cls) + '.shp'
    where_clause = field + '= ' + str(cls)
    arcpy.Select_analysis(shpIn, shpClass, where_clause)

    arcpy.AddField_management(shpClass, "_ID", "STRING")
    arcpy.CalculateField_management(shpClass, "_ID", '!FID!', "PYTHON_9.3")

    pathCls = pathOut + 'Class_' + str(cls) + '/'
    if os.path.exists(pathCls) == False:
        os.makedirs(pathCls)
    arcpy.env.workspace = pathCls
    arcpy.Split_analysis(shpClass, shpClass, "_ID", pathCls)
    shpsCls = arcpy.ListFeatureClasses()
    num = 0
    pathClsBod = pathOut + 'ClsBod_' + str(cls) + '/'
    if os.path.exists(pathClsBod) == False:
        os.makedirs(pathClsBod)
    for shp in shpsCls:
        print shp
        shpBound = pathClsBod + "Bound_Cls" + str(num) + '.shp'
        arcpy.MinimumBoundingGeometry_management(shp, shpBound,
                                                 "RECTANGLE_BY_AREA", "NONE")
        rasOut = ExtractByMask(ras, shpBound)
        pathExtr = pathOut + str(cls) + '_Extr/'
        if os.path.exists(pathExtr) == False:
            os.makedirs(pathExtr)
        rasOut.save(pathExtr + str(num) + '.tif')
Esempio n. 21
0
def main(in_raster=None, areaOfInterest=None, saveTINs=False,
         out_workspace=None):

    if isinstance(saveTINs, str) and saveTINs.lower() == 'false':
        saveTINs = False
    if isinstance(saveTINs, str) and saveTINs.lower() == 'true':
        saveTINs = True

    rastName = os.path.splitext(os.path.split(in_raster)[1])[0]
    bathyRaster = Raster(in_raster)
    cellSize = bathyRaster.meanCellHeight

    with TempDir() as d:
        # Check if multipart polygon and convert to singlepart if true
        with arcpy.da.SearchCursor(areaOfInterest, ["SHAPE@"]) as cursor:
            for row in cursor:
                geometry = row[0]
                if geometry.isMultipart is True:
                    utils.msg("Converting multipart geometry to single parts...")
                    singlepart = os.path.join(d, 'singlepart.shp')
                    arcpy.MultipartToSinglepart_management(areaOfInterest,
                                                           singlepart)
                    arcpy.CopyFeatures_management(singlepart, areaOfInterest)

        # Name temporary files
        elevationTIN = os.path.join(d, 'elevationTIN')
        boundaryBuffer = os.path.join(d, 'bnd_buf.shp')
        boundaryRaster = os.path.join(d, 'bnd_rast.tif')
        boundaryPoints = os.path.join(d, 'bnd_pts.shp')
        pobfRaster = os.path.join(d, 'pobf_rast.tif')

        # Create elevation TIN
        utils.msg("Creating elevation TIN...")
        # just compute statitics
        utils.raster_properties(bathyRaster, attribute=None)
        zTolerance = abs((bathyRaster.maximum - bathyRaster.minimum)/10)
        arcpy.RasterTin_3d(bathyRaster, elevationTIN, str(zTolerance))
        arcpy.EditTin_3d(elevationTIN, ["#", "<None>", "<None>",
                                        "hardclip", "false"])

        # If more than one polygon in areaOfInterest,
        # split into separate files to process
        splitFiles = [areaOfInterest]
        multiple = False
        aoi_count = int(arcpy.GetCount_management(areaOfInterest).getOutput(0))
        if aoi_count > 1:
            multiple = True
            arcpy.AddField_management(areaOfInterest, "Name", "TEXT")
            splitFiles = []
            with arcpy.da.UpdateCursor(areaOfInterest,
                                       "Name") as cursor:
                for (i, row) in enumerate(cursor):
                    row[0] = "poly_{}".format(i)
                    splitFiles.append("in_memory\poly_{}".format(i))
                    cursor.updateRow(row)
            arcpy.Split_analysis(areaOfInterest, areaOfInterest,
                                 'Name', 'in_memory')

        # grab an output directory, we may need it if TINs are being saved
        if out_workspace is None or not os.path.exists(out_workspace):
            # get full path for aoi
            aoi_path = arcpy.Describe(areaOfInterest).catalogPath
            out_dir = os.path.split(aoi_path)[0]
        else:
            out_dir = out_workspace

        # Calculate ACR for each polygon
        pobfs = []
        num_polys = len(splitFiles)
        for (i, each) in enumerate(splitFiles, start=1):
            if num_polys == 1:
                acr_msg = "Calculating ACR Rugosity..."
            else:
                acr_msg = ("Calculating ACR Rugosity for Area "
                           "{} of {}...".format(i, num_polys))
            utils.msg(acr_msg)

            # Create POBF TIN
            arcpy.Buffer_analysis(each, boundaryBuffer,
                                  cellSize, "OUTSIDE_ONLY")
            arcpy.Clip_management(in_raster, '#', boundaryRaster,
                                  boundaryBuffer, '#',
                                  'ClippingGeometry', 'NO_MAINTAIN_EXTENT')
            arcpy.RasterToPoint_conversion(boundaryRaster,
                                           boundaryPoints, 'Value')
            arcpy.GlobalPolynomialInterpolation_ga(boundaryPoints, "grid_code",
                                                   "#", pobfRaster, cellSize)
            arcpy.CalculateStatistics_management(pobfRaster)
            if len(splitFiles) == 1:
                basename = '{}_planarTIN'.format(rastName)
            else:
                basename = '{}_planarTIN_{}'.format(rastName, i)
            pobf_temp = os.path.join(d, basename)
            pobf_perm = os.path.join(out_dir, basename)
            pobfs.append((pobf_temp, pobf_perm))

            zTolerance = abs((int(Raster(pobfRaster).maximum) -
                              int(Raster(pobfRaster).minimum))/10)
            arcpy.RasterTin_3d(pobfRaster, pobf_temp, str(zTolerance))
            arcpy.EditTin_3d(pobf_temp, ["#", "<None>", "<None>",
                                         "hardclip", "false"])
            # Calculate Rugosity
            arcpy.PolygonVolume_3d(elevationTIN, each, "<None>",
                                   "BELOW", "Volume1", "Surf_Area")
            arcpy.PolygonVolume_3d(pobf_temp, each, "<None>",
                                   "BELOW", "Volume2", "Plan_Area")
            arcpy.AddField_management(each, "Rugosity", "DOUBLE")
            arcpy.CalculateField_management(each, "Rugosity",
                                            "!Surf_Area! / !Plan_Area!",
                                            "PYTHON_9.3")
            arcpy.DeleteField_management(each, "Volume2;Volume1;Name")
            # Calculate Slope and Aspect
            arcpy.AddField_management(each, "Slope", "DOUBLE")
            arcpy.AddField_management(each, "Aspect", "DOUBLE")
            pobfXSize = Raster(pobfRaster).meanCellWidth
            pobfYSize = Raster(pobfRaster).meanCellHeight
            pobfArray = arcpy.RasterToNumPyArray(pobfRaster,
                                                 None, 3, 3)
            dz_dx = ((pobfArray[0, 2] + 2 * pobfArray[1, 2] +
                      pobfArray[2, 2]) -
                     (pobfArray[0, 0] + 2 * pobfArray[1, 0] +
                      pobfArray[2, 0])) / (8.0 * pobfXSize)
            dz_dy = ((pobfArray[2, 0] + 2 * pobfArray[2, 1] +
                      pobfArray[2, 2]) -
                     (pobfArray[0, 0] + 2 * pobfArray[0, 1] +
                      pobfArray[0, 2])) / (8.0 * pobfYSize)
            raw_aspect = (180 / np.pi) * np.arctan2(dz_dy, -dz_dx)
            if np.equal(dz_dy, dz_dx) and np.equal(dz_dy, 0):
                aspect = -1
            else:
                if np.equal(raw_aspect, 0):
                    aspect = 90
                elif np.equal(raw_aspect, 90):
                    aspect = 0
                elif raw_aspect > 90:
                    aspect = 360.0 - raw_aspect + 90
                else:
                    aspect = 90.0 - raw_aspect
            with arcpy.da.UpdateCursor(each, ["Slope", "Aspect"]) as cursor:
                for rows in cursor:
                    rows[0] = np.arctan(np.sqrt(dz_dx**2 +
                                                dz_dy**2))*(180/np.pi)
                    rows[1] = aspect
                    cursor.updateRow(rows)

        # Merge split files and save to input file location
        if multiple:
            arcpy.Merge_management(splitFiles, areaOfInterest)

        # Save TINs if requested
        if saveTINs:
            utils.msg("Saving elevation and planar TINs to "
                      "{}...".format(out_dir))
            arcpy.CopyTin_3d(elevationTIN,
                             os.path.join(out_dir,
                                          '{}_elevationTIN'.format(rastName)))

            for (pobf_temp, pobf_perm) in pobfs:
                arcpy.CopyTin_3d(pobf_temp, pobf_perm)
Esempio n. 22
0
print('Field added')

#We will simply copy the field ID to the split ID field.
print('Calculate field')
arcpy.CalculateField_management(MergedIsobasins, "splitid", "!FID!", "PYTHON3",
                                None)
print('Field calculated')

#The split id field is used to split the isobasins to create one shapefile per basin.
featuretobesplit = MergedIsobasins
splitFeatures = MergedIsobasins
splitField = 'splitid'
outWorkspace = 'D:/WilliamLidberg/FeatureExtraction/SplittedFeatures/SplitIsobasins/'
clusterTol = "1 Meters"
print('Splitting isobasin shapefile')
arcpy.Split_analysis(featuretobesplit, splitFeatures, splitField, outWorkspace,
                     clusterTol)
print('Splitting complete')

#Buffer splited isobasins with 1 km to avoid potential edge effects
SplittedBasin = 'D:/WilliamLidberg/FeatureExtraction/SplittedFeatures/SplitIsobasins/'
BufferedBasin = 'D:/WilliamLidberg/FeatureExtraction/SplittedFeatures/BufferedBasins/'
Distance = '1000 meter'
arcpy.env.workspace = 'D:/WilliamLidberg/FeatureExtraction/SplittedFeatures/SplitIsobasins'
# List the feature classes in the folder with splited isobasins
fcList = arcpy.ListFeatureClasses()

#River and lake polygons will be converted to raster at a later stage. That requires a field with a value.
#Add empy field named value.
Lakepolygons = 'D:/WilliamLidberg/Data/GIS/Fastighetskartan_vektor/riks/riks/water.shp'
print('Adding new numeric field to River/Lake shapefile')
arcpy.AddField_management(Lakepolygons, 'value', 'DOUBLE')
Esempio n. 23
0
arcpy.CalculateField_management(area_accessible_stands, "AREA", "[ZWISCHEN]&[AREA]", "VB", "")

# Process: Delete Field
arcpy.DeleteField_management(Output_Feature_Class__5_, "zwischen")

# Process: Select Layer By Location
arcpy.SelectLayerByLocation_management(inventory_layer_Layer, "INTERSECT", operable_areas, "", "NEW_SELECTION")

# Process: Copy Features
arcpy.CopyFeatures_management(inventory_layer_Layer__2_, inventory_layer_CopyFeatures__6_, "", "0", "0", "0")

# Process: Spatial Join
arcpy.SpatialJoin_analysis(inventory_layer_CopyFeatures__6_, operable_areas, inventory_layer_CopyFeaturesspasf, "JOIN_ONE_TO_ONE", "KEEP_ALL", "STRATUM \"STRATUM\" true true false 2 Short 0 0 ,First,#,in_memory\\inventory_layer_CopyFeatures,STRATUM,-1,-1;ACRES \"ACRES\" true true false 8 Double 0 0 ,First,#,in_memory\\inventory_layer_CopyFeatures,ACRES,-1,-1;Date_Typed \"Date_Typed\" true true false 50 Text 0 0 ,First,#,in_memory\\inventory_layer_CopyFeatures,Date_Typed,-1,-1;Density_Ca \"Density_Ca\" true true false 50 Text 0 0 ,First,#,in_memory\\inventory_layer_CopyFeatures,Density_Ca,-1,-1;Veg_Call \"Veg_Call\" true true false 50 Text 0 0 ,First,#,in_memory\\inventory_layer_CopyFeatures,Veg_Call,-1,-1;Size_Call \"Size_Call\" true true false 50 Text 0 0 ,First,#,in_memory\\inventory_layer_CopyFeatures,Size_Call,-1,-1;Veg_Code \"Veg_Code\" true true false 6 Text 0 0 ,First,#,in_memory\\inventory_layer_CopyFeatures,Veg_Code,-1,-1;Shape_Leng \"Shape_Leng\" true true false 8 Double 0 0 ,First,#,in_memory\\inventory_layer_CopyFeatures,Shape_Leng,-1,-1;Gross_CF \"Gross_CF\" true true false 8 Double 0 0 ,First,#,in_memory\\inventory_layer_CopyFeatures,Gross_CF,-1,-1;Net_BF \"Net_BF\" true true false 8 Double 0 0 ,First,#,in_memory\\inventory_layer_CopyFeatures,Net_BF,-1,-1;Tons_Acre \"Tons_Acre\" true true false 8 Double 0 0 ,First,#,in_memory\\inventory_layer_CopyFeatures,Tons_Acre,-1,-1;Stand_Tons \"Stand_Tons\" true true false 8 Double 0 0 ,First,#,in_memory\\inventory_layer_CopyFeatures,Stand_Tons,-1,-1;Stand_CF \"Stand_CF\" true true false 8 Double 0 0 ,First,#,in_memory\\inventory_layer_CopyFeatures,Stand_CF,-1,-1;Stand_BF \"Stand_BF\" true true false 8 Double 0 0 ,First,#,in_memory\\inventory_layer_CopyFeatures,Stand_BF,-1,-1;Shape_Length \"Shape_Length\" false true true 8 Double 0 0 ,First,#,in_memory\\inventory_layer_CopyFeatures,Shape_Length,-1,-1;Shape_Area \"Shape_Area\" false true true 8 Double 0 0 ,First,#,in_memory\\inventory_layer_CopyFeatures,Shape_Area,-1,-1", "INTERSECT", "", "")

# Process: Split
arcpy.Split_analysis(inventory_layer_CopyFeaturesspasf, operable_areas, "AREA", in_memory__2_, "")

# Process: Copy Features (3)
arcpy.CopyFeatures_management(Access_Points_to_Operable_Areas, access_points__2_, "", "0", "0", "0")

# Process: Add Field (3)
arcpy.AddField_management(access_points__2_, "zwischen", "TEXT", "", "", "", "", "NULLABLE", "NON_REQUIRED", "")

# Process: Calculate Field (9)
arcpy.CalculateField_management(Output_Feature_Class__6_, "zwischen", "'area'", "PYTHON", "")

# Process: Calculate Field (10)
arcpy.CalculateField_management(Output_Feature_Class__7_, "AREA", "[ZWISCHEN]&[AREA]", "VB", "")

# Process: Delete Field (2)
arcpy.DeleteField_management(Output_Feature_Class__8_, "zwischen")
Esempio n. 24
0
# ChompOverlaps.py
# Removes overlaps by letting the first polygon chomp its overlapping area out of the second.

import arcpy, os
arcpy.env.overwriteOutput = True
inpoly = arcpy.GetParameterAsText(0)
field = arcpy.GetParameterAsText(1)
outfolder = arcpy.GetParameterAsText(2)
try:
    arcpy.CreateFileGDB_management(outfolder, "ChompOverlaps")
except:
    pass
scratch = os.path.join(outfolder, "ChompOverlaps.gdb")
arcpy.CopyFeatures_management(inpoly, os.path.join(scratch, "inpoly"))
scratchpoly = os.path.join(scratch, "inpoly")
mem = "in_memory"
arcpy.env.workspace = mem
arcpy.Split_analysis(inpoly, inpoly, field, mem, '1')
fcs = []
for root, dirs, files in arcpy.da.Walk(mem):
    for file in files:
        fcs.append(os.path.join(root, file)

for fc in fcs:
    arcpy.Erase_analysis(scratchpoly, fc, os.path.join(scratch, "outpoly"))
        
Esempio n. 25
0
arcpy.SelectLayerByLocation_management(junction_lyr, "INTERSECT", tenha, '',
                                       "NEW_SELECTION")
arcpy.CopyFeatures_management(junction_lyr,
                              os.path.join(outfolder, "tenhajunction.shp"))
tenhajunction = os.path.join(outfolder, "tenhajunction.shp")

# Split lakes.
arcpy.AddField_management(fourha, "ID", "TEXT")
arcpy.CalculateField_management(fourha, "ID", '''"%s" % (!FID!)''', "PYTHON")
arcpy.AddField_management(all4ha, "ID", "TEXT")
arcpy.CalculateField_management(all4ha, "ID", '''"%s" % (!FID!)''', "PYTHON")
if not os.path.exists(os.path.join(outfolder, "lakes")):
    os.mkdir(os.path.join(outfolder, "lakes"))

lakes = os.path.join(outfolder, "lakes")
arcpy.Split_analysis(all4ha, all4ha, "ID", lakes)

# Iterate tracing.
arcpy.env.workspace = lakes
arcpy.MakeFeatureLayer_management(watersheds,
                                  os.path.join(outfolder, "watersheds.lyr"))
watersheds_lyr = os.path.join(outfolder, "watersheds.lyr")
fcs = arcpy.ListFeatureClasses()
arcpy.MakeFeatureLayer_management(
    fourhajunction, os.path.join(outfolder, "fourhajunction.lyr"))
fourhajunction_lyr = os.path.join(outfolder, "fourhajunction.lyr")

# Create folder for final output
if not os.path.exists(os.path.join(outfolder, "IWS")):
    os.mkdir(os.path.join(outfolder, "IWS"))
def cumulative_watersheds(nhd, watersheds, topoutfolder, filterlakes):

    # Naming Convention
    subregion_number = os.path.basename(nhd)
    subregion = subregion_number[4:8]
    outfolder = os.path.join(topoutfolder, subregion + "CWS")
    if not os.path.exists(outfolder):
        os.mkdir(outfolder)

    # Projections:
    nad83 = arcpy.SpatialReference(4269)
    albers = arcpy.SpatialReference(102039)

    # NHD variables:
    flowline = os.path.join(nhd, "Hydrography", "NHDFlowline")
    waterbody = os.path.join(nhd, "Hydrography", "NHDWaterbody")
    network = os.path.join(nhd, "Hydrography", "HYDRO_NET")
    junction = os.path.join(nhd, "Hydrography", "HYDRO_NET_Junctions")
    arcpy.env.extent = waterbody

    # Make shapefiles for one hectare and ten hectare lakes that intersect flowlines.
    arcpy.FeatureClassToShapefile_conversion(waterbody, outfolder)
    waterbodyshp = os.path.join(outfolder, "NHDWaterbody.shp")
    waterbody_lyr = os.path.join(outfolder, "waterbody.lyr")
    arcpy.MakeFeatureLayer_management(waterbodyshp, waterbody_lyr)
    arcpy.SelectLayerByAttribute_management(waterbody_lyr, "NEW_SELECTION",
                                            '''"AreaSqKm">=0.04''')

    fcodes = (39000, 39004, 39009, 39010, 39011, 39012, 43600, 43613, 43615,
              43617, 43618, 43619, 43621)
    whereClause = '''("AreaSqKm" >=0.04 AND "FCode" IN %s) OR ("FCode" = 43601 AND "AreqSqKm" >= 0.1)''' % (
        fcodes, )
    ##    whereClause = '''"AreaSqKm" >=0.04 AND ("FCode" = 39000 OR "FCode" = 39004 OR\
    ##    "FCode" = 39009 OR "FCode" = 39010 OR "FCode" = 39011 OR "FCode" = 39012 OR "FCode" = 43600 OR "FCode" = 43613 OR\
    ##    "FCode" = 43615 OR "FCode" = 43617 OR "FCode" = 43618 OR "FCode" = 43619 OR "FCode" = 43621 OR ("FCode" = 43601 AND "AreaSqKm" >=0.1 ))'''
    arcpy.SelectLayerByAttribute_management(waterbody_lyr, "SUBSET_SELECTION",
                                            whereClause)

    all4ha = os.path.join(outfolder, "all4ha.shp")
    arcpy.CopyFeatures_management(waterbody_lyr, all4ha)

    arcpy.SelectLayerByLocation_management(waterbody_lyr, "INTERSECT",
                                           flowline, "", "SUBSET_SELECTION")

    try:
        filtershp = os.path.join(outfolder, "filter.shp")
        arcpy.Project_management(filterlakes, filtershp, nad83, '', albers)
        arcpy.SelectLayerByLocation_management(waterbody_lyr, "INTERSECT",
                                               filtershp, '',
                                               "SUBSET_SELECTION")

    except:
        pass

    fourha = os.path.join(outfolder, "fourha.shp")
    arcpy.CopyFeatures_management(waterbody_lyr, fourha)

    fourha_lyr = os.path.join(outfolder, "fourha.lyr")
    arcpy.MakeFeatureLayer_management(fourha, fourha_lyr)

    # Make shapefiles of junctions that intersect one hectare and ten hectare lakes.
    junction_lyr = os.path.join(outfolder, "junction.lyr")
    arcpy.MakeFeatureLayer_management(junction, junction_lyr)

    arcpy.SelectLayerByLocation_management(junction_lyr, "INTERSECT", fourha,
                                           '', "NEW_SELECTION")

    fourhajunction = os.path.join(outfolder, "fourhajunction.shp")
    arcpy.CopyFeatures_management(junction_lyr, fourhajunction)

    # Split lakes.
    arcpy.AddField_management(fourha, "ID", "TEXT")
    arcpy.CalculateField_management(fourha, "ID", '''"%s" % (!FID!)''',
                                    "PYTHON")
    arcpy.AddField_management(all4ha, "ID", "TEXT")
    arcpy.CalculateField_management(all4ha, "ID", '''"%s" % (!FID!)''',
                                    "PYTHON")

    lakes = os.path.join(outfolder, "lakes")
    if not os.path.exists(lakes):
        os.mkdir(lakes)

    arcpy.Split_analysis(all4ha, all4ha, "ID", lakes)

    # Iterate tracing.
    arcpy.env.workspace = lakes
    watersheds_lyr = os.path.join(outfolder, "watersheds.lyr")
    arcpy.MakeFeatureLayer_management(watersheds, watersheds_lyr)

    fcs = arcpy.ListFeatureClasses()
    fourhajunction_lyr = os.path.join(outfolder, "fourhajunction.lyr")
    arcpy.MakeFeatureLayer_management(fourhajunction, fourhajunction_lyr)

    # Create folder for final output
    cws = os.path.join(outfolder, "CWS")
    if not os.path.exists(cws):
        os.mkdir(cws)

    arcpy.AddMessage("Starting iteration.")

    for fc in fcs:

        arcpy.RefreshCatalog(outfolder)
        name = os.path.splitext(fc)[0]
        arcpy.AddMessage("Processing " + name + ".")
        # Sets the output to in memory:
        lakes = "in_memory"
        # Repair the lake geometery if needed.
        arcpy.RepairGeometry_management(fc)
        # Make sure the lake's own watershed gets added (merged) back in to the final aggregated watershed:
        # Make a centroid for the lake, then intersect it with watersheds, then merge it with the previous sheds made above.
        center = os.path.join(lakes, "center" + name)
        arcpy.FeatureToPoint_management(fc, center, "INSIDE")

        arcpy.SelectLayerByLocation_management(watersheds_lyr, "INTERSECT",
                                               center, '', "NEW_SELECTION")
        ownshed = os.path.join(lakes, "ownshed" + name)
        arcpy.CopyFeatures_management(watersheds_lyr, ownshed)

        # Select 4 hectare lake junctions that do intersect it.
        arcpy.SelectLayerByLocation_management(fourhajunction_lyr, "INTERSECT",
                                               fc, '', "NEW_SELECTION")
        # Copy junctions
        lakejunction = os.path.join(lakes, "junct" + name)
        arcpy.CopyFeatures_management(fourhajunction_lyr, lakejunction)

        try:
            # Trace the network upstream from the junctions from above.
            arcpy.TraceGeometricNetwork_management(
                network, os.path.join(lakes, "im" + name + "tracelyr"),
                lakejunction, "TRACE_UPSTREAM")
            trace = os.path.join(lakes, "im" + name + "tracelyr",
                                 "NHDFlowline")

            # Write the trace
            traceshp = os.path.join(lakes, "im" + name + "trace")
            arcpy.CopyFeatures_management(trace, traceshp)

            # Make a layer from the trace
            tracesel = os.path.join(lakes, "im" + name + "tracesellyr")
            arcpy.MakeFeatureLayer_management(traceshp, tracesel)

            # Select from the trace lines those that don't have their midpoint in the lake
            arcpy.SelectLayerByLocation_management(tracesel,
                                                   "HAVE_THEIR_CENTER_IN", fc,
                                                   '', "NEW_SELECTION")
            arcpy.SelectLayerByLocation_management(tracesel,
                                                   "HAVE_THEIR_CENTER_IN", fc,
                                                   '', "SWITCH_SELECTION")
            # Select watersheds that intersect the trace
            arcpy.SelectLayerByLocation_management(watersheds_lyr, "INTERSECT",
                                                   tracesel, '',
                                                   "NEW_SELECTION")
            sheds = os.path.join(lakes, "im" + name + "sheds")
            arcpy.CopyFeatures_management(watersheds_lyr, sheds)

            sheds_lyr = os.path.join(lakes, "im" + name + "shedslyr")
            arcpy.MakeFeatureLayer_management(sheds, sheds_lyr)

        except:
            arcpy.AddMessage("Isolated shed.")

        sheds3 = os.path.join(lakes, "sheds3" + name)
        try:
            arcpy.Merge_management([sheds, ownshed], sheds3)

        except:
            arcpy.CopyFeatures_management(ownshed, sheds3)

        # Dissolve the aggregate watershed if it has more than one polygon
        polynumber = int(arcpy.GetCount_management(sheds3).getOutput(0))
        pre = os.path.join(lakes, "pre" + name)
        if polynumber > 1:
            arcpy.AddField_management(sheds3, "Dissolve", "TEXT")
            arcpy.CalculateField_management(sheds3, "Dissolve", "1", "PYTHON")
            arcpy.Dissolve_management(sheds3, pre)
        elif polynumber < 2:
            arcpy.CopyFeatures_management(sheds3, pre)

        # Get the permanent id from the feature and add it to output shed
        field = "Permanent_"
        cursor = arcpy.SearchCursor(fc)
        for row in cursor:
            id = row.getValue(field)
        arcpy.AddField_management(pre, "NHD_ID", "TEXT")
        arcpy.CalculateField_management(pre, "NHD_ID", '"{0}"'.format(id),
                                        "PYTHON")

        # Erase the lakes own geometry from its watershed
        arcpy.Erase_analysis(pre, fc, os.path.join(CWS, "CWS" + name + ".shp"))

        # Delete intermediate in_memory fcs and variables
        temp_items = [
            lakejunction, trace, traceshp, tracesel, sheds, sheds_lyr, center,
            sheds2, sheds3, pre, fc, ownshed
        ]
        cu.cleanup(temp_items)
Esempio n. 27
0
    else:
        arcpy.env.workspace = gdb  #--change working directory to each GDB in list
        #arcpy.env.outputCoordinateSystem = arcpy.SpatialReference(r"C:\Data\WWF\Processing\WWF_5min_grid_Moll.prj")
        fclist = arcpy.ListFeatureClasses()
        for fc in fclist:
            for i in FieldNameList:
                if "2004METT" in str(fc):
                    field = FieldNameList[0]
                    cursor = arcpy.SearchCursor(fc)
                    for row in cursor:
                        t = row.getValue(field)
                        if str(t) == 'yes':
                            arcpy.env.overwrite = False
                            arcpy.Split_analysis(fc,
                                                 fc,
                                                 i,
                                                 gdb,
                                                 cluster_tolerance="#")
                            print "splitting " + "%s" % fc + "by " + "%s" % i

#Delete all empty feature classes

#env.workspace = r"C:\Temp\Test.gdb"

listFCs = arcpy.ListFeatureClasses("*")

for fc in listFCs:
    count1 = str(arcpy.GetCount_management(fc))
    if count1 == "0":
        arcpy.Delete_management(fc)
Esempio n. 28
0
  mydata = arcpy.GetParameterAsText(1)
  myboundary = arcpy.GetParameterAsText(2)
  myresult = arcpy.GetParameterAsText(3)
  inValueRaster = arcpy.GetParameterAsText(4)
  splitField = "CODE"
  zoneField = "DLBM"
  
  # liu
  arcpy.AddField_management(myboundary, "Area", "DOUBLE")
  arcpy.CalculateField_management(myboundary, "Area", "!shape.area@SQUAREKILOMETERS!", "PYTHON_9.3")
  arcpy.AddField_management(myresult,"Area_sum","DOUBLE")
  


  # 按照 myboundary 中的 splitField 字段对 mydata 进行 split
  arcpy.Split_analysis(mydata, myboundary, splitField, outWorkspace)
  # 添加结果字段(添加之后勿重复执行代码)
  classes = ['01', '02', '03', '04', '05', '06', '07', '08', '09', '10', '11', '12', '20']
  for i in classes:
    arcpy.AddField_management(myresult,"s"+i,"DOUBLE")
  for i in classes:
    arcpy.AddField_management(myresult,"a"+i,"DOUBLE")
  for i in classes:
    arcpy.AddField_management(myresult,"areaperc"+i,"DOUBLE")

  # 设置循环对象
  datasets = list(set(arcpy.ListFeatureClasses("*shp*"))-set(arcpy.ListFeatureClasses("*DLTB*"))-set(arcpy.ListFeatureClasses("*boundry*")))

  # 循环计算灯光指数和面积           
  for element in datasets:
      update_dlbm(element)
Esempio n. 29
0
import arcpy


if __name__ == '__main__':
    China = "D:/DaTa/Yangtz/workflow/shape_ehi/fishnet/3.shp"
    in_polygon = "D:/DaTa/Yangtz/workflow/shape_ehi/fishnet/3.shp"

    arcpy.env.overwriteOutput = True

    arcpy.Split_analysis(China, in_polygon, "idj", "D:/DaTa/Yangtz/workflow/shape_ehi/sp_/3/")
Esempio n. 30
0
    def execute(self, parameters, messages):
        """The source code of the tool."""

        try:

            # Clear memory JIC
            deleteInMemory()

            # Get the analysis ID
            analysis_id = (parameters[34].valueAsText.split("[")[1][:5])+"_"+\
                          (parameters[33].valueAsText.split("[")[1][:3])+"_"+\
                          (parameters[32].valueAsText)

            # Get the alternative selection
            alternative = parameters[34].valueAsText.split("[")[1][:5]

            # Make a directory
            parent_folder_path = os.path.join(
                os.path.dirname(parameters[1].valueAsText),
                os.path.basename(parameters[1].valueAsText))
            child_folder_path = parent_folder_path + "\\" + analysis_id

            # JIC
            if not os.path.exists(parent_folder_path):
                os.mkdir(parent_folder_path)

            if not os.path.exists(child_folder_path):
                os.mkdir(child_folder_path)

            date_time_stamp = re.sub('[^0-9]', '',
                                     str(datetime.datetime.now())[5:16])
            #filename = os.path.basename(__file__)
            analysis_id_time_stamp = analysis_id + "_" + date_time_stamp

            # Create the logger
            report_path = child_folder_path + "\\" + analysis_id_time_stamp + "_Report.txt"
            logfile_path = child_folder_path + "\\" + analysis_id_time_stamp + "_Logfile.txt"
            logger = pyt_log(report_path, logfile_path)

            if parameters[35] == False:
                logger.log_active = False

            # Start logging
            logger.log_all("Surface Use Analysis " +
                           str(datetime.datetime.now()))
            logger.log_report("_" * 120 + "\n")
            logger.log_all("Running environment: Python - {}\n".format(
                sys.version))
            logger.log_all("User: "******"\n")
            logger.log_all("Analysis Type: " + analysis_id + "\n")
            logger.log_all("Analysis Area:\n")
            logger.log_all('\t' + parameters[0].valueAsText + '\n')
            logger.log_all("Output Location:\n")
            logger.log_all('\t' + parameters[1].valueAsText + '\n')

            ###################################################################################################
            ##
            ## MAIN PROGRAM
            ##
            ###################################################################################################

            # Make a geodatabase
            database_name = analysis_id_time_stamp + '.gdb'
            database_path = child_folder_path
            arcpy.CreateFileGDB_management(database_path, database_name,
                                           "10.0")
            output_path = database_path + "\\" + database_name
            logger.log_all('Created geodatabase at: \n')
            logger.log_all('\t' + output_path + "\n")

            # Secure a copy of the input analysis area
            arcpy.MakeFeatureLayer_management(parameters[0].value,
                                              "in_memory\\_")

            # Dissolve everything to prevent overlapping input polygons
            logger.console('1.) Dissolving input polygon')
            arcpy.Dissolve_management("in_memory\\_", "in_memory\\__")
            analysis_area = output_path + "\\" + analysis_id + "_Analysis_Area"
            arcpy.CopyFeatures_management("in_memory\\__", analysis_area)

            # Set the workspace to the output database
            arcpy.env.workspace = output_path
            logger.logfile("Env workspace:", output_path)

            # Identify spatial reference of analysis area
            spatial_ref = arcpy.Describe(analysis_area).spatialReference
            logger.logfile("Spatial reference:", str(spatial_ref))

            # The main data structure
            # key = parameter ID
            # values = ['input parameter paths', 'category', 'Code']
            input_params = {
                '02air_quality_climate':
                [parameters[2].valueAsText, 'Resources', 'AIR_QUAL'],
                '03aquatic_wildlife':
                [parameters[3].valueAsText, 'Resources', 'AQUAT_WL'],
                '04cultural_resources':
                [parameters[4].valueAsText, 'Resources', 'CULTURAL'],
                '05fire_fuel':
                [parameters[5].valueAsText, 'Resources', 'FIRE_FUELS'],
                '06geology': [parameters[6].valueAsText, 'Resources', 'GEO'],
                '07wilderness_characteristics':
                [parameters[7].valueAsText, 'Resources', 'LWC'],
                '08paleo_resources':
                [parameters[8].valueAsText, 'Resources', 'PALEO'],
                '09soil_resources':
                [parameters[9].valueAsText, 'Resources', 'SOIL'],
                '10special_status_species':
                [parameters[10].valueAsText, 'Resources', 'SS_SPECIES'],
                '11terrestrial_wildlife':
                [parameters[11].valueAsText, 'Resources', 'TERR_WL'],
                '12tribal_concerns':
                [parameters[12].valueAsText, 'Resources', 'TRIBAL'],
                '13vegetation':
                [parameters[13].valueAsText, 'Resources', 'VEG'],
                '14visual_resources':
                [parameters[14].valueAsText, 'Resources', 'VISUAL'],
                '15water_resources': [
                    parameters[15].valueAsText, 'Resources', 'WATER'
                ],
                '16wetlands_riparian': [
                    parameters[16].valueAsText, 'Resources', 'WETLANDS'
                ],
                '17forestry': [
                    parameters[17].valueAsText, 'Resource_Uses', 'FORESTRY'
                ],
                '18livestock_grazing': [
                    parameters[18].valueAsText, 'Resource_Uses', 'GRAZING'
                ],
                '19lands_realty': [
                    parameters[19].valueAsText, 'Resource_Uses', 'LANDS'
                ],
                '20minerals': [
                    parameters[20].valueAsText, 'Resource_Uses', 'MINERALS'
                ],
                '21recreation': [
                    parameters[21].valueAsText, 'Resource_Uses', 'REC'
                ],
                '22renewable_energy': [
                    parameters[22].valueAsText, 'Resource_Uses', 'RENEWABLE'
                ],
                '23south_park_MLP': [
                    parameters[23].valueAsText, 'Resource_Uses', 'SPMLP'
                ],
                '24travel_transportation': [
                    parameters[24].valueAsText, 'Resource_Uses', 'TRAVEL'
                ],
                '25ACECs': [
                    parameters[25].valueAsText, 'Special_Designations', 'ACEC'
                ],
                '26BCAs': [
                    parameters[26].valueAsText, 'Special_Designations', 'BCA'
                ],
                '27scenic_byways':
                [parameters[27].valueAsText, 'Special_Designations', 'BYWAYS'],
                '28wilderness_areas_WSAs': [
                    parameters[28].valueAsText, 'Special_Designations', 'WSA'
                ],
                '29wild_scenic_rivers': [
                    parameters[29].valueAsText, 'Special_Designations', 'WSR'
                ],
                '30aml_hazmat':
                [parameters[30].valueAsText, 'Social_Economics', 'AML_HAZMAT'],
                '31social_economic_values': [
                    parameters[31].valueAsText, 'Social_Economics', 'SOC_ECON'
                ]
            }

            # Create a sorted list of input parameters with actual values
            sorted_inputs = sorted([
                item for item in input_params.items() if not item[1][0] == None
            ])

            logger.logfile('Raw inputs:', sorted_inputs)
            logger.logfile('Valid inputs:', sorted_inputs)

            # Verify that there were some inputs
            if len(sorted_inputs) == 0:
                logger.log_all('No Inputs')
                logger.log_all("There are no valid inputs - system exit")
                sys.exit()

            # Get a list of the categories represented in the input data
            input_categories = set([item[1][1] for item in sorted_inputs])
            logger.logfile('Input categories:', input_categories)

            # Create feature datasets: 'Inputs' for copy of input data, 'Results' for outputs
            arcpy.CreateFeatureDataset_management(output_path, "Inputs",
                                                  spatial_ref)
            arcpy.CreateFeatureDataset_management(output_path, "Results",
                                                  spatial_ref)

            logger.console('2.) Dissolving criteria unions')

            # Function to unioned the layers and dissolve
            # Deletes attribute data!
            def union_inputs(input):  #(name, fc_list)
                name, fc_list = input[0], input[1]
                union_output = output_path + "\\Inputs\\" + name
                arcpy.Union_analysis(fc_list, "in_memory\\dissolve")
                arcpy.Dissolve_management("in_memory\\dissolve", union_output)
                arcpy.Delete_management("in_memory\\dissolve")
                return

            # Prep the tasks - makes it easier to read
            tasks = [(id[2:], data[0]) for id, data in sorted_inputs]
            logger.logfile('tasks', tasks)

            # Union
            for task in tasks:
                union_inputs(task)

            # Write inputs to report
            for category in input_categories:
                logger.report("\n" + category.upper() + ":\n")
                for ID, data_list in sorted_inputs:
                    paths = data_list[0].split(";")
                    if data_list[1] == category:
                        logger.report("\t" + ID[2:].upper().replace("_", " ") +
                                      ' - ' + data_list[2] + '\n')
                        for path_name in paths:
                            logger.report("\t\t" + path_name)
                        logger.report("\n")

            # Create a master list of all category fcs that were created for later intersection
            all_fcs_list = []
            for fc in arcpy.ListFeatureClasses(feature_dataset="Inputs"):
                all_fcs_list.append(fc)
            logger.logfile('all_fcs_list', all_fcs_list)

            # For each fc in all_fcs_list,
            # dissolve the fc and out put as analysis_id + feature name
            for fc in all_fcs_list:
                logger.logfile("FC", fc)
                output_fc_name = "Restriction_" + os.path.basename(fc)
                logger.logfile('output_fc_name', output_fc_name)
                output_fc_path = output_path + "\\Results\\" + output_fc_name
                #output_fc_path = output_path+"\\"+output_fc_name
                logger.logfile('output_fc_path', output_fc_path)
                arcpy.Clip_analysis(analysis_area, fc, "in_memory\\clip")
                # Dissolve the clips
                arcpy.Dissolve_management("in_memory\\clip", output_fc_path)

            # map the criteria to their categories
            fc_id_map = defaultdict(str)
            for key, value in sorted_inputs:
                fc_id_map[key[2:]] = value[2]

            # Collapse geometry union [will be slow with full input]
            logger.console('3.) Unioning all criteria inputs')
            output_aggregate_feature = output_path + "\\Aggregate_Results"

            # Add input analysis area to list of union and union it all
            all_fcs_list_copy = copy.deepcopy(all_fcs_list)
            ### Try actual path to Analysis_Area
            #all_fcs_list_copy.append(u'Analysis_Area')
            all_fcs_list_copy.append(analysis_area)
            logger.logfile("all_fcs_list_copy", all_fcs_list_copy)
            arcpy.Union_analysis(all_fcs_list_copy, "in_memory\\agg_union")

            # Clip the union and output it
            arcpy.Clip_analysis("in_memory\\agg_union", analysis_area,
                                "in_memory\\clip_")

            # Make sure everything is in single-part format for later analysis - JIC
            arcpy.MultipartToSinglepart_management("in_memory\\clip_",
                                                   output_aggregate_feature)

            # Create the matrix
            logger.console('4.) Creating matrix')

            # Erase all the other fields - ETFP
            erase_fields_lst = [
                field.name
                for field in arcpy.ListFields(output_aggregate_feature)
            ]
            for field in erase_fields_lst:
                try:
                    arcpy.DeleteField_management(output_aggregate_feature,
                                                 field)
                except:
                    # Should minimally fail on OID, Shape, Shape_area, and Shape_length
                    logger.logfile("Delete field failed:", field)

            # Delete identical features within output_aggregate_feature to prevent double counting
            try:
                arcpy.DeleteIdentical_management(output_aggregate_feature,
                                                 ["SHAPE"])
                logger.logfile("Delete identical succeeded")
            except:
                logger.logfile("Delete identical failed"
                               )  # This will usually fail - it's ok

            # Calculate acres for output_aggregate_field and get acre field name
            acre_field = get_acres(output_aggregate_feature)

            # Create a defaultdict to store acreages - default dictionaries are awesome!
            acreage_counts = defaultdict(int)

            # Iterate across all_fcs_list and add field,
            # select by location and calculate field with ID, remove null
            arcpy.MakeFeatureLayer_management(output_aggregate_feature,
                                              "in_memory\\mem_agg_layer")

            # Create a list to store all added field ids
            fc_field_list = []

            logger.console('5.) Populating matrix')
            for fc in all_fcs_list:
                fc_ID = fc_id_map[str(fc)]

                # Copy the created fields for later use in fields summary
                fc_field_list.append(fc_ID)

                # Add field, select, and calculate
                arcpy.AddField_management("in_memory\\mem_agg_layer",
                                          fc_ID,
                                          "Text",
                                          field_length=20)
                arcpy.SelectLayerByLocation_management(
                    "in_memory\\mem_agg_layer",
                    "WITHIN",
                    fc,
                    selection_type="NEW_SELECTION")

                arcpy.CalculateField_management("in_memory\\mem_agg_layer",
                                                fc_ID, '"' + fc_ID + '"',
                                                "PYTHON_9.3")

                # Get the acres
                fc_acres = sum([
                    row[0] for row in arcpy.da.SearchCursor(
                        "in_memory\\mem_agg_layer", acre_field)
                ])

                # Add key=fc_id and value=acreage to sweet default dictionary
                acreage_counts[fc_ID] = round(fc_acres, 2)

                # Switch the selection
                arcpy.SelectLayerByAttribute_management(
                    "in_memory\\mem_agg_layer", "SWITCH_SELECTION")

                # Clean the table for readability - replace "Null" with ""
                arcpy.CalculateField_management("in_memory\\mem_agg_layer",
                                                fc_ID, '"' + "" + '"',
                                                "PYTHON_9.3")

                arcpy.SelectLayerByAttribute_management(
                    "in_memory\\mem_agg_layer", "CLEAR_SELECTION")

            # Write the markup feature to disc
            output_aggregate_feature_markup = output_path + "\\" + analysis_id + "_Restrictions_Markup"
            arcpy.CopyFeatures_management("in_memory\\mem_agg_layer",
                                          output_aggregate_feature_markup)

            # Create a summary field and get list of other fields
            arcpy.AddField_management(output_aggregate_feature_markup,
                                      "Summary",
                                      "Text",
                                      field_length=255)

            fc_field_list.append('Summary')
            num_fields = len(fc_field_list)
            with arcpy.da.UpdateCursor(output_aggregate_feature_markup,
                                       fc_field_list) as cur:
                for row in cur:
                    # There's gotta be a better way to do this...
                    row[num_fields - 1] = re.sub(
                        '\s+', ' ',
                        (reduce(lambda x, y: x + " " + y,
                                [row[i]
                                 for i in range(num_fields - 1)]))).strip()

                    cur.updateRow(row)

            # Get the total analysis acreage
            arcpy.MakeFeatureLayer_management(output_aggregate_feature_markup,
                                              "in_memory\\_markup")

            total_analysis_acres = sum([
                row[0] for row in arcpy.da.SearchCursor(
                    "in_memory\\_markup", acre_field)
            ])

            logger.logfile("Total analysis acres", total_analysis_acres)

            # Get the total marked-up acreage
            logger.console('6.) Creating markup output')
            arcpy.SelectLayerByAttribute_management("in_memory\\_markup",
                                                    "NEW_SELECTION",
                                                    """ "Summary" <> '' """)

            total_markup_acres = sum([
                row[0] for row in arcpy.da.SearchCursor(
                    "in_memory\\_markup", acre_field)
            ])

            logger.logfile("Total markup acres", total_markup_acres)

            # Delete the lingering unmarked output
            # Comment out to keep original with original fields
            arcpy.Delete_management(output_aggregate_feature)

            # Partition datasets - Alternative D
            logger.logfile("alternative", alternative)
            if alternative == "ALT_D":

                # Partition the data sets by ecoregion and write outputs to csv
                logger.console('      Partitioning outputs by ecoregions')
                ecoregions = r"T:\CO\GIS\giswork\rgfo\projects\management_plans\ECRMP"\
                             r"\Draft_RMP_EIS\1_Analysis\ECRMP_Outputs\boundaries"\
                             r"\boundaries.gdb\ECRMP_HumanEcoregions_AltD_20160602"

                # Create a default dict to hold the values
                ecoregion_markup_acres = defaultdict(int)

                # Get a list of ecoregions
                ecoregion_field = "Community_Landscape"
                ecoregion_list = [
                    str(row[0]) for row in arcpy.da.SearchCursor(
                        ecoregions, ecoregion_field)
                ]

                logger.logfile("Ecoregion_list", ecoregion_list)

                # these will be created by split
                ecoregion_out_names = [
                    output_path + "\\" + er for er in ecoregion_list
                ]
                # Rename to these:
                ecoregion_rename = [
                    output_path + "\\" + analysis_id + "__" + er
                    for er in ecoregion_list
                ]
                logger.logfile("Ecoregion_out_names", ecoregion_out_names)

                arcpy.Split_analysis(output_aggregate_feature_markup,
                                     ecoregions, ecoregion_field, output_path)

                # Rename the ecoregion split outputs
                for old_name, new_name in zip(ecoregion_out_names,
                                              ecoregion_rename):
                    arcpy.Rename_management(old_name, new_name)

                for ecoregion_fc in ecoregion_rename:
                    # Get the acres
                    acre_field = get_acres(ecoregion_fc)
                    arcpy.MakeFeatureLayer_management(ecoregion_fc,
                                                      "in_memory\\ecoregion")
                    arcpy.SelectLayerByAttribute_management(
                        "in_memory\\ecoregion", "NEW_SELECTION",
                        """ "Summary" <> '' """)

                    ecoregion_acres = sum([
                        row[0] for row in arcpy.da.SearchCursor(
                            "in_memory\\ecoregion", acre_field)
                    ])

                    # Add key=fc_id and value=acreage to sweet default dictionary
                    ecoregion_markup_acres[ecoregion_fc] = round(
                        ecoregion_acres, 2)

            # Write outputs acreages to csv
            logger.console('7.) Writing data')
            outCSV = child_folder_path + "\\" + analysis_id_time_stamp + '_Acreage.csv'
            with open(outCSV, 'wb') as csvfile:
                csvwriter = csv.writer(csvfile)
                csvwriter.writerow([
                    "Total Analysis Acres",
                    str(round(total_analysis_acres, 2))
                ])
                csvwriter.writerow(["", ""])
                csvwriter.writerow(["", ""])
                # Write the criteria data
                csvwriter.writerow([
                    'Criteria', analysis_id + "_Raw_Acres",
                    analysis_id + "_Rounded_Acres", "Raw_Percent"
                ])

                for fc_id, acres in sorted(acreage_counts.items()):
                    csvwriter.writerow([
                        fc_id, acres,
                        round(acres, -2),
                        ((acres / total_analysis_acres) * 100)
                    ])

                csvwriter.writerow(["", ""])
                csvwriter.writerow(["", ""])
                # Write the total data
                csvwriter.writerow([
                    "Total " + analysis_id + " Acres",
                    round(total_markup_acres, 2),
                    round(total_markup_acres, -2),
                    (total_markup_acres / total_analysis_acres) * 100
                ])

                if alternative == "ALT_D":
                    # Write the ecoregion data
                    csvwriter.writerow(["", ""])
                    csvwriter.writerow(["", ""])
                    csvwriter.writerow(
                        ['Ecoregion', "Raw_Acres", "Rounded_Acres"])
                    for ecoregion, acres in sorted(
                            ecoregion_markup_acres.items()):
                        csvwriter.writerow(
                            [ecoregion, acres,
                             round(acres, -2)])
                    csvwriter.writerow(["", ""])
                    csvwriter.writerow(["", ""])

            logger.log_all('\nSuccessful completion..\n')

###################################################################################################
##
## EXCEPTIONS
##
###################################################################################################

        except:
            try:
                logger.log_all(
                    '\n\nTOOL - USE RESTRICTIONS DID NOT SUCCESSFULLY COMPLETE'
                )
                logger.console('See logfile for details')
                logger.log_all('Exceptions:\n')
                logger.log_report('_' * 120 + '\n')
                logger.log_all(str(traceback.format_exc()))
            except:
                pass

###################################################################################################
##
## CLEAN-UP
##
###################################################################################################

        finally:
            end_time = datetime.datetime.now()
            try:
                logger.log_all("End Time: " + str(end_time))
                logger.log_all("Time Elapsed: %s" %
                               (str(end_time - start_time)))
            except:
                pass
            deleteInMemory()