예제 #1
0
def tabulate(omid, source_fc, clip_fc, fields):
    tab_fc = "TABULATION_OM%s_%s" % (omid, source_fc.replace(
        "in_memory\\", ""))
    exists = arcpy.Exists(tab_fc)

    if exists and recreate_if_exists:
        arcpy.Delete_management(tab_fc)
        arcpy.TabulateIntersection_analysis(om_fc_m, "PageNumber", clip_fc,
                                            tab_fc, fields)
    elif not exists:
        arcpy.TabulateIntersection_analysis(om_fc_m, "PageNumber", clip_fc,
                                            tab_fc, fields)

    arcpy.Delete_management(clip_fc)
예제 #2
0
    def execute(self, params, messages):
        MarxanDB = params[0].valueAsText
        species_lyr = params[1].valueAsText
        elsubid1 = params[2].valueAsText

        pu_layer = os.path.join(MarxanDB, "pulayer", "pulayer.shp")

        arcpy.env.workspace = "in_memory"

        arcpy.AddMessage("tabulating area")
        pu_fc = arcpy.FeatureClassToFeatureClass_conversion(
            pu_layer, env.workspace, "pu_fc")
        tab_area = arcpy.TabulateIntersection_analysis(pu_fc, "id",
                                                       species_lyr, "tab_area",
                                                       elsubid1)

        arcpy.AlterField_management(tab_area, "id", "pu")
        arcpy.AlterField_management(tab_area, elsubid1, "species")
        arcpy.AlterField_management(tab_area, "AREA", "amount")

        puvspr_dat = os.path.join(MarxanDB, "input", "puvspr.dat")

        fields = ["species", "pu", "amount"]

        with open(puvspr_dat, "a+") as f:
            f.write('\t'.join(fields) + '\n')
            with arcpy.da.SearchCursor(tab_area, fields) as cursor:
                for row in cursor:
                    f.write('\t'.join([str(r) for r in row]) + '\n')
        f.close()
        return
예제 #3
0
def calculateCohesion(fileName):
    percentages = {}
    # Generate tabulate intersection table
    table = arcpy.TabulateIntersection_analysis(
        in_zone_features=fileName,
        zone_fields="FID",
        in_class_features=candidateWards,
        out_table=scratchGDB + "/TabulateIntersection",
        class_fields=wardNameField,
        sum_fields="",
        xy_tolerance="-1 Unknown",
        out_units="UNKNOWN")
    tableCursor = arcpy.da.SearchCursor(table, ["FID", "PERCENTAGE"])
    currentId = None
    # Based on tabulate intersection operation, calculate the total, then average number of splits
    for row in tableCursor:
        if (currentId is None or currentId != row[0]):
            currentId = row[0]
            percentages[currentId] = [row[1]]
        else:
            percentages[currentId].append(row[1])
    total = 0
    for row in percentages:
        numSplits = 0
        for percentage in percentages[row]:
            if (percentage) > 1:
                numSplits += 1

        total += numSplits

    return total * 1.0 / len(percentages)
    del tableCursor
예제 #4
0
    def execute(self, params, messages):
        MarxanDB = params[0].valueAsText
        cost = params[1].valueAsText
        stat = params[2].valueAsText
        protected_lands = params[3].valueAsText
        threshold = params[4].valueAsText

        arcpy.env.workspace = "in_memory"

        pulayer = os.path.join(MarxanDB, "pulayer", "pulayer.shp")

        zone_stat = ZonalStatisticsAsTable(pulayer, "id", cost, "zone_stat",
                                           "", stat)
        arcpy.AlterField_management(zone_stat, stat, "cost")

        with arcpy.da.UpdateCursor(zone_stat, "cost") as cursor:
            for row in cursor:
                row[0] = round(row[0], 1)
                cursor.updateRow(row)
        arcpy.JoinField_management(pulayer, "id", zone_stat, "id", "cost")

        dissolve_pad = arcpy.Dissolve_management(protected_lands,
                                                 "dissolve_pad", "", "",
                                                 "MULTI_PART")
        tab_intersect = arcpy.TabulateIntersection_analysis(
            pulayer, "id", dissolve_pad, "tab_intersect")

        arcpy.AddField_management(tab_intersect, "status", "SHORT")
        with arcpy.da.UpdateCursor(tab_intersect,
                                   ["PERCENTAGE", "status"]) as cursor:
            for row in cursor:
                if row[0] >= int(threshold):
                    row[1] = 2
                    cursor.updateRow(row)
                elif row[0] < int(threshold):
                    row[1] = 0
                    cursor.updateRow(row)

        arcpy.JoinField_management(pulayer, "id", tab_intersect, "id",
                                   "status")

        pu_dat = os.path.join(MarxanDB, "input", "pu.dat")

        fields = ["id", "cost", "status"]

        with open(pu_dat, "a+") as f:
            f.write('\t'.join(fields) + '\n')
            with arcpy.da.SearchCursor(pulayer, fields) as cursor:
                for row in cursor:
                    f.write('\t'.join([str(r) for r in row]) + '\n')
        f.close()
        return
예제 #5
0
def SVI():  # SVI and Finish

    # To allow overwriting outputs change overwriteOutput option to True.
    arcpy.env.overwriteOutput = False

    fishnet_clip_mod2 = "D:\\SRCFloodVulnerability\\SRCFloodVulnerability.gdb\\fishnet_clip_mod2"
    fishnet_clip = "D:\\SRCFloodVulnerability\\SRCFloodVulnerability.gdb\\fishnet_clip"
    svi = "D:\\SRCFloodVulnerability\\SRCFloodVulnerability.gdb\\svi"
    SRCFloodVulnerability_gdb = "D:\\SRCFloodVulnerability\\SRCFloodVulnerability.gdb"

    # Process: Tabulate Intersection (Tabulate Intersection) 
    svi_fn_intersect = "D:\\SRCFloodVulnerability\\SRCFloodVulnerability.gdb\\svi_fn_intersect"
    arcpy.TabulateIntersection_analysis(in_zone_features=fishnet_clip, zone_fields=["OBJECTID"], in_class_features=svi, out_table=svi_fn_intersect, class_fields=["RPL_THEME1"], sum_fields=[], xy_tolerance="", out_units="UNKNOWN")

    # Process: Add Field (Add Field) 
    svi_fn_intersect_2_ = arcpy.AddField_management(in_table=svi_fn_intersect, field_name="svi_wgt", field_type="SHORT", field_precision=None, field_scale=None, field_length=None, field_alias="svi_wgt", field_is_nullable="NULLABLE", field_is_required="NON_REQUIRED", field_domain="")[0]

    # Process: Calculate Field (Calculate Field) 
    svi_fn_intersect_3_ = arcpy.CalculateField_management(in_table=svi_fn_intersect_2_, field="svi_wgt", expression="calc_wgts(!PERCENTAGE!)", expression_type="PYTHON3", code_block="def calc_wgts (pct):
    rank_val = 0
    if (pct < 20): 
            rank_val = 1
    if (pct > 20) & (pct < 40): 
            rank_val = 2
    if (pct > 40) & (pct < 60): 
            rank_val = 3
    if (pct > 60) & (pct < 80): 
            rank_val = 4
    if (pct > 80): 
            rank_val = 5
    return rank_val", field_type="TEXT")[0]

    # Process: Add Join (Add Join) 
    fishnet_clip_mod2_Layer = arcpy.AddJoin_management(in_layer_or_view=fishnet_clip_mod2, in_field="OBJECTID", join_table=svi_fn_intersect_3_, join_field="OBJECTID_1", join_type="KEEP_ALL")[0]

    # Process: Feature Class to Feature Class (Feature Class to Feature Class) 
    fishnet_clip_mod3 = arcpy.FeatureClassToFeatureClass_conversion(in_features=fishnet_clip_mod2_Layer, out_path=SRCFloodVulnerability_gdb, out_name="fishnet_clip_mod3", where_clause="", field_mapping="IN_FID \"IN_FID\" true true false 4 Long 0 0,First,#,fishnet_clip_mod2_Layer,fishnet_clip_mod2.IN_FID,-1,-1;FREQUENCY \"FREQUENCY\" true true false 4 Long 0 0,First,#,fishnet_clip_mod2_Layer,fishnet_clip_mod2.FREQUENCY,-1,-1;SUM_IN_FID \"SUM_IN_FID\" true true false 8 Double 0 0,First,#,fishnet_clip_mod2_Layer,fishnet_clip_mod2.SUM_IN_FID,-1,-1;rls_wgt \"rls_wgt\" true true false 2 Short 0 0,First,#,fishnet_clip_mod2_Layer,fishnet_clip_mod2.rls_wgt,-1,-1;OBJECTID \"OBJECTID\" true true false 4 Long 0 0,First,#,fishnet_clip_mod2_Layer,fishnet_clip_mod2.OBJECTID,-1,-1;OBJECTID_1 \"OID\" true true false 4 Long 0 0,First,#,fishnet_clip_mod2_Layer,fishnet_clip_mod2.OBJECTID_1,-1,-1;bfe_bool \"bfe_bool\" true true false 2 Short 0 0,First,#,fishnet_clip_mod2_Layer,fishnet_clip_mod2.bfe_bool,-1,-1;bfe_bool_1 \"bfe_bool\" true true false 2 Short 0 0,First,#,fishnet_clip_mod2_Layer,fishnet_clip_mod2.bfe_bool_1,-1,-1;AREA \"AREA\" true true false 8 Double 0 0,First,#,fishnet_clip_mod2_Layer,fishnet_clip_mod2.AREA,-1,-1;PERCENTAGE \"PERCENTAGE\" true true false 8 Double 0 0,First,#,fishnet_clip_mod2_Layer,fishnet_clip_mod2.PERCENTAGE,-1,-1;bfe_wgt \"bfe_wgt\" true true false 2 Short 0 0,First,#,fishnet_clip_mod2_Layer,fishnet_clip_mod2.bfe_wgt,-1,-1;OBJECTID_12 \"OID\" true true false 4 Long 0 0,First,#,fishnet_clip_mod2_Layer,svi_fn_intersect.OBJECTID_1,-1,-1;RPL_THEME1 \"RPL_THEME1\" true true false 8 Double 0 0,First,#,fishnet_clip_mod2_Layer,svi_fn_intersect.RPL_THEME1,-1,-1;AREA_1 \"AREA\" true true false 8 Double 0 0,First,#,fishnet_clip_mod2_Layer,svi_fn_intersect.AREA,-1,-1;PERCENTAGE_1 \"PERCENTAGE\" true true false 8 Double 0 0,First,#,fishnet_clip_mod2_Layer,svi_fn_intersect.PERCENTAGE,-1,-1;svi_wgt \"svi_wgt\" true true false 2 Short 0 0,First,#,fishnet_clip_mod2_Layer,svi_fn_intersect.svi_wgt,-1,-1", config_keyword="")[0]

    # Process: Calculate Field (2) (Calculate Field) 
    fishnet_clip_mod2_2_ = arcpy.CalculateField_management(in_table=fishnet_clip_mod3, field="svi_wgt", expression="zero_out(!bfe_wgt!)", expression_type="PYTHON3", code_block="def zero_out(flswgt):
    if flswgt is not None:
        return flswgt
    else:
        return 0", field_type="TEXT")[0]

    # Process: Add Field (4) (Add Field) 
    fishnet_clip_mod3_2_ = arcpy.AddField_management(in_table=fishnet_clip_mod2_2_, field_name="mod_wgt", field_type="FLOAT", field_precision=2, field_scale=None, field_length=None, field_alias="mod_wgt", field_is_nullable="NULLABLE", field_is_required="NON_REQUIRED", field_domain="")[0]

    # Process: Calculate Field (4) (Calculate Field) 
    fishnet_clip_mod3_4_ = arcpy.CalculateField_management(in_table=fishnet_clip_mod3_2_, field="mod_wgt", expression="(!bfe_wgt! + !svi_wgt! + !rls_wgt!)/3", expression_type="PYTHON3", code_block="", field_type="TEXT")[0]
예제 #6
0
def BFE():  # BFE

    # To allow overwriting outputs change overwriteOutput option to True.
    arcpy.env.overwriteOutput = False

    SRCFloodVulnerability_gdb = "G:\\My Drive\\Projects\\SRCFloodVulnerability\\SRCFloodVulnerability.gdb"
    fishnet_clip_mod1 = "D:\\SRCFloodVulnerability\\SRCFloodVulnerability.gdb\\fishnet_clip_mod1"
    fishnet_clip = "D:\\SRCFloodVulnerability\\SRCFloodVulnerability.gdb\\fishnet_clip"
    bfe = "D:\\SRCFloodVulnerability\\SRCFloodVulnerability.gdb\\bfe"
    SRCFloodVulnerability_gdb_2_ = "D:\\SRCFloodVulnerability\\SRCFloodVulnerability.gdb"

    # Process: Tabulate Intersection (Tabulate Intersection) 
    bfe_fn_intersect = "D:\\SRCFloodVulnerability\\SRCFloodVulnerability.gdb\\bfe_fn_intersect"
    arcpy.TabulateIntersection_analysis(in_zone_features=fishnet_clip, zone_fields=["OBJECTID"], in_class_features=bfe, out_table=bfe_fn_intersect, class_fields=["bfe_bool"], sum_fields=["bfe_bool"], xy_tolerance="", out_units="UNKNOWN")

    # Process: Add Field (3) (Add Field) 
    bfe_fn_intersect_2_ = arcpy.AddField_management(in_table=bfe_fn_intersect, field_name="bfe_wgt", field_type="SHORT", field_precision=None, field_scale=None, field_length=None, field_alias="bfe_wgt", field_is_nullable="NULLABLE", field_is_required="NON_REQUIRED", field_domain="")[0]

    # Process: Calculate Field (Calculate Field) 
    bfe_fn_intersect_3_ = arcpy.CalculateField_management(in_table=bfe_fn_intersect_2_, field="bfe_wgt", expression="calc_wgts(!PERCENTAGE!)", expression_type="PYTHON3", code_block="def calc_wgts (pct):
    rank_val = 0
    if (pct < 20): 
            rank_val = 1
    if (pct > 20) & (pct < 40): 
            rank_val = 2
    if (pct > 40) & (pct < 60): 
            rank_val = 3
    if (pct > 60) & (pct < 80): 
            rank_val = 4
    if (pct > 80): 
            rank_val = 5
    return rank_val", field_type="TEXT")[0]

    # Process: Add Join (Add Join) 
    fishnet_clip_Layer1 = arcpy.AddJoin_management(in_layer_or_view=fishnet_clip_mod1, in_field="OBJECTID", join_table=bfe_fn_intersect_3_, join_field="OBJECTID_1", join_type="KEEP_ALL")[0]

    # Process: Feature Class to Feature Class (Feature Class to Feature Class) 
    fishnet_clip_mod2 = arcpy.FeatureClassToFeatureClass_conversion(in_features=fishnet_clip_Layer1, out_path=SRCFloodVulnerability_gdb_2_, out_name="fishnet_clip_mod2", where_clause="", field_mapping="IN_FID \"IN_FID\" true true false 4 Long 0 0,First,#,fishnet_clip_mod1_Layer,fishnet_clip_mod1.IN_FID,-1,-1;FREQUENCY \"FREQUENCY\" true true false 4 Long 0 0,First,#,fishnet_clip_mod1_Layer,fishnet_clip_mod1.FREQUENCY,-1,-1;SUM_IN_FID \"SUM_IN_FID\" true true false 8 Double 0 0,First,#,fishnet_clip_mod1_Layer,fishnet_clip_mod1.SUM_IN_FID,-1,-1;rls_wgt \"rls_wgt\" true true false 2 Short 0 0,First,#,fishnet_clip_mod1_Layer,fishnet_clip_mod1.rls_wgt,-1,-1;Shape_Length \"Shape_Length\" false true true 8 Double 0 0,First,#,fishnet_clip_mod1_Layer,fishnet_clip_mod1.Shape_Length,-1,-1;Shape_Area \"Shape_Area\" false true true 8 Double 0 0,First,#,fishnet_clip_mod1_Layer,fishnet_clip_mod1.Shape_Area,-1,-1;OBJECTID \"OBJECTID\" false true false 4 Long 0 9,First,#,fishnet_clip_mod1_Layer,bfe_fn_intersect.OBJECTID,-1,-1;OBJECTID_1 \"OID\" true true false 4 Long 0 0,First,#,fishnet_clip_mod1_Layer,bfe_fn_intersect.OBJECTID_1,-1,-1;bfe_bool \"bfe_bool\" true true false 2 Short 0 0,First,#,fishnet_clip_mod1_Layer,bfe_fn_intersect.bfe_bool,-1,-1;bfe_bool_1 \"bfe_bool\" true true false 2 Short 0 0,First,#,fishnet_clip_mod1_Layer,bfe_fn_intersect.bfe_bool_1,-1,-1;AREA \"AREA\" true true false 8 Double 0 0,First,#,fishnet_clip_mod1_Layer,bfe_fn_intersect.AREA,-1,-1;PERCENTAGE \"PERCENTAGE\" true true false 8 Double 0 0,First,#,fishnet_clip_mod1_Layer,bfe_fn_intersect.PERCENTAGE,-1,-1;bfe_wgt \"bfe_wgt\" true true false 2 Short 0 0,First,#,fishnet_clip_mod1_Layer,bfe_fn_intersect.bfe_wgt,-1,-1", config_keyword="")[0]

    # Process: Calculate Field (2) (Calculate Field) 
    fishnet_clip_mod2_2_ = arcpy.CalculateField_management(in_table=fishnet_clip_mod2, field="bfe_wgt", expression="zero_out(!bfe_wgt!)", expression_type="PYTHON3", code_block="def zero_out(flswgt):
    if flswgt is not None:
        return flswgt
    else:
        return 0", field_type="TEXT")[0]
예제 #7
0
def zonal_attribution_of_polygon_data(zone_fc, zone_field, class_fc, out_table, class_field, rename_tag=''):

    def rename_to_standard(table):
        arcpy.AddMessage("Renaming.")

        # look up the values based on the rename tag
        this_files_dir = os.path.dirname(os.path.abspath(__file__))
        os.chdir(this_files_dir)
        geo_file = os.path.abspath('../geo_metric_provenance.csv')
        with open(geo_file) as csv_file:
            reader = csv.DictReader(csv_file)
            mapping = {row['subgroup_original_code']: row['subgroup']
                       for row in reader if row['main_feature'] in rename_tag and row['main_feature']}
            arcpy.AddMessage(mapping)

        # update them
        for old, new in mapping.items():
            arcpy.AddMessage(new)
            old_fname = '{}'.format(old)
            new_fname = '{}_{}_pct'.format(rename_tag, new)
            if arcpy.ListFields(table, old_fname):
                try:
                    # same problem with AlterField limit of 31 characters here.
                    arcpy.AlterField_management(table, old_fname, new_fname, clear_field_alias=True)
                except:
                    cu.rename_field(table, old_fname, new_fname, deleteOld=True)
        return table


    arcpy.env.workspace = 'in_memory'
    tab = arcpy.TabulateIntersection_analysis(zone_fc, zone_field, class_fc, 'tab', class_field)
    pivot = arcpy.PivotTable_management(tab, zone_field, class_field, "PERCENTAGE", 'pivot')
    renamed = rename_to_standard(pivot)
    arcpy.CopyRows_management(renamed, out_table)
    for item in [tab, pivot, renamed]:
        arcpy.Delete_management(item)
예제 #8
0
    #projList.append(output)
    print "Select by location", clip, "completed at", datetime.datetime.now(
    ).strftime("%I:%M:%S%p")

    ## Tabulate intersection with restricted parameters
    zoneFC = output
    zoneFld = "ADM2_GEOID"
    classFC = clip
    outTab = os.path.join(interFolder, "TabulateIntersect.dbf")
    class_fields = ""
    sum_fields = ""
    xy_tolerance = "-1 Unknown"
    out_unit = "SQUARE_KILOMETERS"

    arcpy.TabulateIntersection_analysis(zoneFC, zoneFld, classFC, outTab,
                                        class_fields, sum_fields, xy_tolerance,
                                        out_unit)

    ## Spatial join
    joinField = "ADM2_GEOID"
    arcpy.MakeFeatureLayer_management(output, "temp")
    arcpy.AddJoin_management("temp", joinField, outTab, joinField, "KEEP_ALL")
    name = os.path.splitext(output)[0]
    ifc = os.path.join(out_gdb, name + "_join")
    arcpy.CopyFeatures_management("temp", ifc)

    ## Delete counties with small areas
    ## Area < 15 km2
    expression = "\"AREA\" < 15"
    expression2 = "\"ADM2_GEOID\" = '84048047'"
    arcpy.MakeFeatureLayer_management(ifc, "temp")
dissolveFields = ["AREASYMBOL", "MUSYM"]
#Dissolve Features
arcpy.Dissolve_management(inFCsoils, "outFCDISSOLVE", dissolveFields)

#Add Field

arcpy.AddField_management(
    "outFCDISSOLVE",
    "ACRES",
    "DOUBLE",
)

#Calculate Field

arcpy.CalculateField_management(
    "outFCDISSOLVE",
    "ACRES",
    '!Shape.area@ACRES!',
    "PYTHON_9.3",
)

#Tabulate Intersection
arcpy.TabulateIntersection_analysis(inFCzone, "MLRARSYM", "outFCDISSOLVE",
                                    "tab_int", 'MUSYM', 'ACRES', '', '')

#Table to Excel
arcpy.TableToExcel_conversion("tab_int", out_xls)

#arcpy.Statistics_analysis("tab_int", "STATISTICS", [["ACRES", "SUM"]])
def polygons_in_zones(zone_fc, zone_field, polygons_of_interest, output_table,
                      interest_selection_expr):
    old_workspace = arcpy.env.workspace
    arcpy.env.workspace = 'in_memory'
    arcpy.SetLogHistory(False)
    arcpy.env.outputCoordinateSystem = arcpy.SpatialReference(102039)
    selected_polys = 'selected_polys'
    # fixes some stupid ArcGIS thing with the interactive Python window
    if arcpy.Exists(selected_polys):
        arcpy.env.overwriteOutput = True

    arcpy.AddMessage('Copying/selecting polygon features...')
    if interest_selection_expr:
        arcpy.Select_analysis(polygons_of_interest, selected_polys,
                              interest_selection_expr)
    else:
        arcpy.CopyFeatures_management(polygons_of_interest, selected_polys)

    # use tabulate intersection for the areas overlapping
    arcpy.AddMessage('Tabulating intersection between zones and polygons...')
    tab_table = arcpy.TabulateIntersection_analysis(
        zone_fc, zone_field, selected_polys, 'tabulate_intersection_table')

    # area was calculated in map units which was m2 so convert to hectares
    arcpy.AddField_management(tab_table, 'Poly_Ha', 'DOUBLE')
    arcpy.CalculateField_management(tab_table, 'Poly_Ha', '!AREA!/10000',
                                    'PYTHON')

    # just change the name of the percent field
    arcpy.AlterField_management(tab_table, 'PERCENTAGE', 'Poly_Pct')

    # Now just get the count as there is no other area metric anymore
    spjoin_fc = arcpy.SpatialJoin_analysis(zone_fc, selected_polys,
                                           'spatial_join_output')
    arcpy.AlterField_management(spjoin_fc, 'Join_Count', 'Poly_n')

    # Add the density
    arcpy.AddField_management(spjoin_fc, 'Poly_nperha', 'DOUBLE')
    arcpy.CalculateField_management(spjoin_fc, 'Poly_nperha',
                                    '!Poly_n!/!shape.area@hectares!', 'PYTHON')

    arcpy.AddMessage('Refining output...')
    arcpy.JoinField_management(tab_table, zone_field, spjoin_fc, zone_field,
                               ["Poly_n", 'Poly_nperha'])
    final_fields = ['Poly_Ha', 'Poly_Pct', 'Poly_n', 'Poly_nperha']

    # make output nice
    arcpy.env.overwriteOutput = False
    cu.one_in_one_out(tab_table, final_fields, zone_fc, zone_field,
                      output_table)

    cu.redefine_nulls(output_table, final_fields, [0, 0, 0, 0])

    # clean up
    # can't delete all of in_memory because this function is meant to be called from another one that uses in_memory
    for item in [selected_polys, tab_table, spjoin_fc]:
        arcpy.Delete_management(item)
    arcpy.env.workspace = old_workspace

    arcpy.AddMessage('Polygons in zones tool complete.')
    arcpy.SetLogHistory(True)
예제 #11
0
def main(workspace, areaOfInterest, albertaloticRiparian,
         albertaMergedWetlandInventory, quarterSectionBoundaries,
         parksProtectedAreasAlberta, humanFootprint):

    # Import necesarry modules
    import numpy as np
    import arcpy

    # Overwrite output and checkout neccesary extensions
    arcpy.env.overwriteOutput = True
    arcpy.CheckOutExtension("spatial")

    # assign workspace
    arcpy.env.workspace = workspace

    # First we project our parcel data into the correct projection, create a layer file, then select only parcels we are interested in with Select by Attribute
    # and Select by Location (Intersecting tht Area of Interest polygon), then export this selection to a new feature class called "ParcelsFinal"

    # Local Variables
    quarterSectionBoundaries_project = "quarterSectionBoundaries_project"
    quarterSectionBoundaries_project_layer = "quarterSectionBoundaries_project_layer"
    ParcelsFinal = "ParcelsFinal"

    # Process: Project
    arcpy.Project_management(
        quarterSectionBoundaries, quarterSectionBoundaries_project,
        "PROJCS['NAD_1983_10TM_AEP_Forest',GEOGCS['GCS_North_American_1983',DATUM['D_North_American_1983',SPHEROID['GRS_1980',6378137.0,298.257222101]],PRIMEM['Greenwich',0.0],UNIT['Degree',0.0174532925199433]],PROJECTION['Transverse_Mercator'],PARAMETER['False_Easting',500000.0],PARAMETER['False_Northing',0.0],PARAMETER['Central_Meridian',-115.0],PARAMETER['Scale_Factor',0.9992],PARAMETER['Latitude_Of_Origin',0.0],UNIT['Meter',1.0]]",
        "",
        "GEOGCS['GCS_North_American_1983',DATUM['D_North_American_1983',SPHEROID['GRS_1980',6378137.0,298.257222101]],PRIMEM['Greenwich',0.0],UNIT['Degree',0.0174532925199433]]",
        "NO_PRESERVE_SHAPE", "", "NO_VERTICAL")

    # Process: Make Feature Layer
    arcpy.MakeFeatureLayer_management(
        quarterSectionBoundaries_project,
        quarterSectionBoundaries_project_layer, "", "",
        "OBJECTID OBJECTID VISIBLE NONE;Shape Shape VISIBLE NONE;MER MER VISIBLE NONE;RGE RGE VISIBLE NONE;TWP TWP VISIBLE NONE;SEC SEC VISIBLE NONE;QS QS VISIBLE NONE;RA RA VISIBLE NONE;PARCEL_ID PARCEL_ID VISIBLE NONE;Shape_length Shape_length VISIBLE NONE;Shape_area Shape_area VISIBLE NONE"
    )

    # selects all parcels intersecting the users area of interest
    # Process: Select Layer By Location
    arcpy.SelectLayerByLocation_management(
        quarterSectionBoundaries_project_layer, "INTERSECT", areaOfInterest,
        "", "NEW_SELECTION", "NOT_INVERT")

    # Removes roads from parcel data to ensure that only quarter sections are selected
    # Process: Select Layer By Attribute
    arcpy.SelectLayerByAttribute_management(
        quarterSectionBoundaries_project_layer, "SUBSET_SELECTION",
        "RA NOT LIKE 'R'")

    # Process: Copy Features
    arcpy.CopyFeatures_management(quarterSectionBoundaries_project_layer,
                                  ParcelsFinal, "", "0", "0", "0")

    # ############### ArcGis MODEL BUILDER SECTION: for initial Geoproccessing #################################################################################################################

    # The following was exported from ArcMap's Model builder. It performs most of the neccessary geoprocessing needed to determine the spatial relationships
    # between the parcels and the user provided data (Human footprint, Lotic(Riparian), Wetlands, Patch Size, and Proximity)

    # local Variables:
    footprint_EXTENT_CLIPPED = "Footprint_Extent_Clipped"
    Footprint_Inverse = "Footprint_Inverse"
    Intact_Area_Per_Parcel = "Intact_Area_Per_Parcel"
    Wetland_Extent_Clipped = "Wetland_Extent_Clipped"
    Wetland_Lines = "Wetland_Lines"
    Wetland_Edge_Per_Parcel = "Wetland_Edge_Per_Parcel"
    Lotic_Extent_Clipped = "Lotic_Extent_Clipped"
    Lotic_No_Wetlands = "Lotic_No_Wetlands"
    Lotic_Area_Per_Parcel = "Lotic_Area_Per_Parcel"
    Area_Of_Interest_Buffered = "Area_Of_Interest_Buffered"
    Footprint_Larger_Extent = "Footprint_Larger_Extent"
    Footprint_INVERSE_Large = "Footprint_INVERSE_Large"
    Footprint_INVERSE_Large_Explode = "Footprint_INVERSE_Large_Explode"

    # Process: Clip
    arcpy.Clip_analysis(humanFootprint, ParcelsFinal, footprint_EXTENT_CLIPPED,
                        "")

    # Process: Erase
    arcpy.Erase_analysis(ParcelsFinal, footprint_EXTENT_CLIPPED,
                         Footprint_Inverse, "")

    #
    # Process: Tabulate Intersection
    arcpy.TabulateIntersection_analysis(ParcelsFinal, "OBJECTID",
                                        Footprint_Inverse,
                                        Intact_Area_Per_Parcel, "", "", "",
                                        "UNKNOWN")

    # Process: Clip (3)
    arcpy.Clip_analysis(albertaMergedWetlandInventory, ParcelsFinal,
                        Wetland_Extent_Clipped, "")

    # Process: Feature To Line
    arcpy.FeatureToLine_management(Wetland_Extent_Clipped, Wetland_Lines, "",
                                   "ATTRIBUTES")
    ##arcpy.FeatureToLine_management("'D:\\evanamiesgalonskiMOBILE\\1 Courses\\329\\Final Project\\DATA\\test results.gdb\\Wetland_Extent_Clipped'", Wetland_Lines, "", "ATTRIBUTES")

    # Process: Tabulate Intersection (2)
    arcpy.TabulateIntersection_analysis(ParcelsFinal, "OBJECTID",
                                        Wetland_Lines, Wetland_Edge_Per_Parcel,
                                        "", "", "", "UNKNOWN")

    # Process: Clip (4)
    arcpy.Clip_analysis(albertaloticRiparian, ParcelsFinal,
                        Lotic_Extent_Clipped, "")

    # Process: Erase (2)
    arcpy.Erase_analysis(Lotic_Extent_Clipped, Wetland_Extent_Clipped,
                         Lotic_No_Wetlands, "")

    # Process: Tabulate Intersection (3)
    arcpy.TabulateIntersection_analysis(ParcelsFinal, "OBJECTID",
                                        Lotic_No_Wetlands,
                                        Lotic_Area_Per_Parcel, "", "", "",
                                        "UNKNOWN")

    # Process: Buffer
    arcpy.Buffer_analysis(areaOfInterest, Area_Of_Interest_Buffered,
                          "50 Kilometers", "FULL", "ROUND", "NONE", "",
                          "PLANAR")

    # Process: Clip (2)
    arcpy.Clip_analysis(humanFootprint, Area_Of_Interest_Buffered,
                        Footprint_Larger_Extent, "")

    # Process: Erase (3)
    arcpy.Erase_analysis(Area_Of_Interest_Buffered, Footprint_Larger_Extent,
                         Footprint_INVERSE_Large, "")

    # Process: Multipart To Singlepart
    arcpy.MultipartToSinglepart_management(Footprint_INVERSE_Large,
                                           Footprint_INVERSE_Large_Explode)

    # ###########################################################################################################################################################################

    # This part of the script edits the nwely created tables that contain information about the instersection of Wetlands, Lotic, and Intactness data with the land parcels
    # The Area and Percent coverage fields are renamed to be more decriptive and to ensure there are no confusing duplicate field names in our ParcelsFinal feature class.

    # Alter Field names in intactness table
    arcpy.AlterField_management(Intact_Area_Per_Parcel,
                                "AREA",
                                new_field_name="Area_Intact",
                                field_is_nullable="NULLABLE")
    arcpy.AlterField_management(Intact_Area_Per_Parcel,
                                "PERCENTAGE",
                                new_field_name="Percent_Intact",
                                field_is_nullable="NULLABLE")

    # Alter field names in lotic_table
    arcpy.AlterField_management(Lotic_Area_Per_Parcel,
                                "AREA",
                                new_field_name="Area_Lotic",
                                field_is_nullable="NULLABLE")
    arcpy.AlterField_management(Lotic_Area_Per_Parcel,
                                "PERCENTAGE",
                                new_field_name="Percent_Lotic",
                                field_is_nullable="NULLABLE")

    # Alter Field name in wetlands_table
    arcpy.AlterField_management(Wetland_Edge_Per_Parcel,
                                "LENGTH",
                                new_field_name="Wetland_Edge",
                                field_is_nullable="NULLABLE")

    # Now we will join the desired fields from the 3 tables (intactness, lotic, ad wetlands) to the Land Parcel feature class

    # Process: Join Field
    arcpy.JoinField_management(ParcelsFinal, "OBJECTID",
                               Intact_Area_Per_Parcel, "OBJECTID_1",
                               ["Area_Intact", "Percent_Intact"])

    # Process: Join Field (2)
    arcpy.JoinField_management(ParcelsFinal, "OBJECTID", Lotic_Area_Per_Parcel,
                               "OBJECTID_1", ["Area_Lotic", "Percent_Lotic"])

    # Process: Join Field (3)
    arcpy.JoinField_management(ParcelsFinal, "OBJECTID",
                               Wetland_Edge_Per_Parcel, "OBJECTID_1",
                               "Wetland_Edge")

    # Now we get rid of null values in our new fields and replace them with zeros

    with arcpy.da.UpdateCursor(ParcelsFinal, ["Area_Intact"]) as cursor:
        for row in cursor:
            if row[0] == None:
                row[0] = 0
                cursor.updateRow(row)

    with arcpy.da.UpdateCursor(ParcelsFinal, ["Percent_Intact"]) as cursor:
        for row in cursor:
            if row[0] == None:
                row[0] = 0
                cursor.updateRow(row)

    with arcpy.da.UpdateCursor(ParcelsFinal, ["Area_Lotic"]) as cursor:
        for row in cursor:
            if row[0] == None:
                row[0] = 0
                cursor.updateRow(row)

    with arcpy.da.UpdateCursor(ParcelsFinal, ["Percent_Lotic"]) as cursor:
        for row in cursor:
            if row[0] == None:
                row[0] = 0
                cursor.updateRow(row)

    with arcpy.da.UpdateCursor(ParcelsFinal, ["Wetland_Edge"]) as cursor:
        for row in cursor:
            if row[0] == None:
                row[0] = 0
                cursor.updateRow(row)

    # This section of the script calculates the largest intact patch that intersects each parcel

    # Local Variables
    Footprint_INVERSE_Large_Explode = "Footprint_INVERSE_Large_Explode"
    Patch_Sizes_Per_Parcel = "Patch_Sizes_Per_Parcel"

    # Process: Tabulate Intersection
    arcpy.TabulateIntersection_analysis(ParcelsFinal, "OBJECTID",
                                        Footprint_INVERSE_Large_Explode,
                                        Patch_Sizes_Per_Parcel, "SHAPE_Area",
                                        "", "", "UNKNOWN")

    # A table was created with Tabulate Intersection that contains the areas of all intact patches that intersect
    # each parcel. We have several duplicates of each Parcel OBJECTID in this table, one for every patch that intersects a parcel.
    # we need to determine which duplicate OBJECTID corresponds to the largest patch area.

    # First we get a full list of the object IDs in our clipped ParcelsFinal Class
    # even though there is only one value in each cell of the attribute table, the data type is a tuple, so we need to extract our value our of it, as with a list
    parcel_IDs_extracted = []
    parcel_IDs = arcpy.da.SearchCursor(ParcelsFinal, "OBJECTID")
    for ID in parcel_IDs:
        if isinstance(ID, tuple):
            ID = ID[0]
            parcel_IDs_extracted.append(ID)
        else:
            parcel_IDs_extracted.append(ID)

    Patch_Sizes_Per_Parcel = "Patch_Sizes_Per_Parcel"

    ##    # remove null values
    ##    with arcpy.da.UpdateCursor(Patch_Sizes_Per_Parcel, ["SHAPE_Area"]) as cursor:
    ##        for row in cursor:
    ##            if row[0] == None:
    ##                row[0] = 0
    ##                cursor.updateRow(row)

    # Now we get a full list of all of the Parcel Object ID that had at least one intersection with the "Intact" feature class (human footprint inverse)
    # NOTE: not all of the parcels in our area of interest necessarily intersect with the "Intact" feature class
    patch_IDs = arcpy.da.SearchCursor(Patch_Sizes_Per_Parcel, "OBJECTID_1")
    patch_IDs_extracted = []
    for ID in patch_IDs:
        if isinstance(ID, tuple):
            ID = ID[0]
            patch_IDs_extracted.append(ID)
        elif isinstance(ID, str):
            patch_IDs_extracted.append(ID)

    # initialize 2 new lists
    orderedListofLists = []
    newlist = []
    # for each OBJECT ID we create a list of areas which are the intsects for a parcel, then append that list as an element in our list of lists (orderedListofLists)
    # the newlist is re-initialized every intereation after it has dumped its values into the orderedlistoflists. The orderedlistoflists is not re-initialized, and continues to be appended to.
    # Now the intersections for each parcel are nicely grouped together
    for ID in parcel_IDs_extracted:
        patch_IDs_and_Areas = arcpy.da.SearchCursor(
            Patch_Sizes_Per_Parcel, ["OBJECTID_1", "SHAPE_Area"])
        if ID not in patch_IDs_extracted:  # This step ensures that parcels that have not intersection receive a zero instead of being glossed over. This will maintain order of our field values.
            orderedListofLists.append(0)
        else:
            newlist = []
            for rows in patch_IDs_and_Areas:
                if ID == rows[0]:
                    x = rows[1]
                    newlist.append(x)
            orderedListofLists.append(newlist)

    # initialize one more list
    # Since the intersections(areas) are grouped by parcel, we extract the highest number in each list element (which is a list), and this give us the largest patch size for each parcel.
    max_patch_size_per_parcel = []

    for patchSizes in orderedListofLists:
        if patchSizes == 0:
            max_patch_size_per_parcel.append(0)
        else:
            max_patch_size_per_parcel.append(max(patchSizes))

    # convert to acres for scoring
    max_patch_size_per_parcel_acres = []
    acre = 0
    for patchsize in max_patch_size_per_parcel:
        acre = patchsize / 4046.86
        max_patch_size_per_parcel_acres.append(acre)

    # Now we have a list that contains the largest patch that intersects each parcel.
    # It is ordered the same as the OBJECTID and we can now create a new field in the parcels feature class and
    # iteratively polulate the rows with each patch area value

    # create new field
    arcpy.AddField_management(ParcelsFinal,
                              "Largest_Patch_Area",
                              "DOUBLE",
                              field_length=50)

    # initialize x
    x = 0

    # use update cursor to populate rows and after each time the cursor moves down to the next row,
    # iterate to the next list element (x)
    with arcpy.da.UpdateCursor(ParcelsFinal, "Largest_Patch_Area") as cursor:
        for row in cursor:
            row[0] = max_patch_size_per_parcel_acres[x]
            cursor.updateRow(row)
            x += 1

    # the following code calculates the nearest protected area feature and automatically creates a new field that contains that distance for each parcel.
    # Process: Near
    arcpy.Near_analysis(ParcelsFinal, parksProtectedAreasAlberta, "",
                        "NO_LOCATION", "NO_ANGLE", "PLANAR")

    # #######################################################################################################################################################################################################

    # The next section of code calulates the scores for each parcel based on the values is our newly added/created fields.

    # ##################### INTACTNESS SCORE #######################

    # extract percent intact field
    intact_scores = []
    percent_intact = arcpy.da.SearchCursor(ParcelsFinal, "Percent_Intact")
    # Perform calulation for score and append to new list. Accomodate for str and tuple field types
    for percent in percent_intact:
        if isinstance(percent, tuple):
            percent = percent[0] / 100
        elif isinstance(percent, str):
            percent = float(percent)
        intact_scores.append(percent)

    # create new field
    arcpy.AddField_management(ParcelsFinal,
                              "SCORE_Intactness",
                              "DOUBLE",
                              field_length=50)

    x = 0

    # use update cursor to populate rows with list element and after each time the cursor moves down to the next row,
    # iterate to the next list element (x)
    with arcpy.da.UpdateCursor(ParcelsFinal, "SCORE_Intactness") as cursor:
        for row in cursor:
            row[0] = intact_scores[x]
            cursor.updateRow(row)
            x += 1

    # ################### Lotic (Riparian) Score #########################

    # extract percent lotic field
    lotic_percent_list = []
    percent_lotic = arcpy.da.SearchCursor(ParcelsFinal, "Percent_Lotic")
    #  Accomodate for str and tuple field types
    for percent in percent_lotic:
        if isinstance(percent, tuple):
            percent = percent[0]
        elif isinstance(percent, str):
            percent = float(percent)
        lotic_percent_list.append(percent)

    # now we create a create a lotic percent list no zeros before establishing ranges for deciles
    lotic_percent_list_noZero = []
    for percent in lotic_percent_list:
        if percent != 0:
            lotic_percent_list_noZero.append(percent)

    # use numbpy to calculate the decile ranges
    ranges = np.percentile(lotic_percent_list_noZero, np.arange(0, 100, 10))

    # iterate through origincal lotic percent list and use the decile ranges to bin the lotic percent values to the appropriate scores
    final_lotic_scores = []
    for percent in lotic_percent_list:
        if percent == 0:
            final_lotic_scores.append(0)
        elif percent >= ranges[0] and percent <= ranges[1]:
            final_lotic_scores.append(0.1)
        elif percent >= ranges[1] and percent <= ranges[2]:
            final_lotic_scores.append(0.2)
        elif percent >= ranges[2] and percent <= ranges[3]:
            final_lotic_scores.append(0.3)
        elif percent >= ranges[3] and percent <= ranges[4]:
            final_lotic_scores.append(0.4)
        elif percent >= ranges[4] and percent <= ranges[5]:
            final_lotic_scores.append(0.5)
        elif percent >= ranges[5] and percent <= ranges[6]:
            final_lotic_scores.append(0.6)
        elif percent >= ranges[6] and percent <= ranges[7]:
            final_lotic_scores.append(0.7)
        elif percent >= ranges[7] and percent <= ranges[8]:
            final_lotic_scores.append(0.8)
        elif percent >= ranges[8] and percent <= ranges[9]:
            final_lotic_scores.append(0.9)
        elif percent >= ranges[9]:
            final_lotic_scores.append(1)

    # the order of the resulting list is identical to the original, so it can be appended as a new field and the values will
    # correspond with the rows they are meant to score

    # create new field
    arcpy.AddField_management(ParcelsFinal,
                              "SCORE_Lotic_Deciles",
                              "DOUBLE",
                              field_length=50)

    x = 0

    # use update cursor to populate rows with list element and after each time the cursor moves down to the next row,
    # iterate to the next list element (x)
    with arcpy.da.UpdateCursor(ParcelsFinal, "SCORE_Lotic_Deciles") as cursor:
        for row in cursor:
            row[0] = final_lotic_scores[x]
            cursor.updateRow(row)
            x += 1

    # ######################### Wetland Score #####################

    # extract Wetland edge length field
    wetland_edge_list = []
    wetland_field = arcpy.da.SearchCursor(ParcelsFinal, "Wetland_Edge")

    # append values to new list. Accomodate for str and tuple field types.
    for length in wetland_field:
        if isinstance(length, tuple):
            length = length[0]
        elif isinstance(length, str):
            length = float(length)
        wetland_edge_list.append(length)

    # now we create a create a wetland edge list no zeros before establishing ranges for deciles
    wetland_edge_list_noZero = []
    for edge_length in wetland_edge_list:
        if edge_length != 0:
            wetland_edge_list_noZero.append(edge_length)

    # use numbpy to calculate the decile ranges
    ranges = np.percentile(wetland_edge_list_noZero, np.arange(0, 100, 10))

    # iterate through original wetland edge list and use the decile ranges to bin the wetland edge values to the appropriate scores
    final_wetland_scores = []
    for edge_length in wetland_edge_list:
        if edge_length == 0:
            final_wetland_scores.append(0)
        elif edge_length >= ranges[0] and edge_length <= ranges[1]:
            final_wetland_scores.append(0.1)
        elif edge_length >= ranges[1] and edge_length <= ranges[2]:
            final_wetland_scores.append(0.2)
        elif edge_length >= ranges[2] and edge_length <= ranges[3]:
            final_wetland_scores.append(0.3)
        elif edge_length >= ranges[3] and edge_length <= ranges[4]:
            final_wetland_scores.append(0.4)
        elif edge_length >= ranges[4] and edge_length <= ranges[5]:
            final_wetland_scores.append(0.5)
        elif edge_length >= ranges[5] and edge_length <= ranges[6]:
            final_wetland_scores.append(0.6)
        elif edge_length >= ranges[6] and edge_length <= ranges[7]:
            final_wetland_scores.append(0.7)
        elif edge_length >= ranges[7] and edge_length <= ranges[8]:
            final_wetland_scores.append(0.8)
        elif edge_length >= ranges[8] and edge_length <= ranges[9]:
            final_wetland_scores.append(0.9)
        elif edge_length >= ranges[9]:
            final_wetland_scores.append(1)

    # the order of the resulting list is identical to the original, so it can be appended as a new field and the values will
    # correspond with the rows they are meant to score

    # create new field
    arcpy.AddField_management(ParcelsFinal,
                              "SCORE_Wetland_Deciles",
                              "DOUBLE",
                              field_length=50)

    x = 0

    # use update cursor to populate rows with list element and after each time the cursor moves down to the next row,
    # iterate to the next list element (x)
    with arcpy.da.UpdateCursor(ParcelsFinal,
                               "SCORE_Wetland_Deciles") as cursor:
        for row in cursor:
            row[0] = final_wetland_scores[x]
            cursor.updateRow(row)
            x += 1

    # ################ Patch size score ####################

    # extract patch size field
    largest_patch_sizes = []
    patch_sizes = arcpy.da.SearchCursor(ParcelsFinal, "Largest_Patch_Area")
    # Perform calulation for score and append to new list. Accomodate for str and tuple field types
    for size in patch_sizes:
        if isinstance(size, tuple):
            size = size[0]
        elif isinstance(size, str):
            size = float(size)
        largest_patch_sizes.append(size)

    # now we populate a new list and assign scores based on number ranges
    patch_size_scores = []
    for size in largest_patch_sizes:
        if size < 160:
            patch_size_scores.append(0)
        elif size > 160 and size < 2500:
            patch_size_scores.append(0.5)
        elif size > 2500 and size < 10000:
            patch_size_scores.append(.75)
        elif size > 10000:
            patch_size_scores.append(1)

    # create new field
    arcpy.AddField_management(ParcelsFinal,
                              "SCORE_Patch_Size",
                              "DOUBLE",
                              field_length=50)

    x = 0

    # use update cursor to populate rows with list element and after each time the cursor moves down to the next row,
    # iterate to the next list element (x)
    with arcpy.da.UpdateCursor(ParcelsFinal, "SCORE_Patch_Size") as cursor:
        for row in cursor:
            row[0] = patch_size_scores[x]
            cursor.updateRow(row)
            x += 1

    # ############### Proximity Score #####################

    # Rename Distance field to be more decriptive
    # delete NEAD FID feild (un-needed)
    arcpy.AlterField_management(ParcelsFinal,
                                "NEAR_DIST",
                                new_field_name="Dist_to_Protected",
                                field_is_nullable="NULLABLE")
    arcpy.DeleteField_management(ParcelsFinal, "NEAR_FID")

    # extract proximity field
    all_proximities = []
    proximities = arcpy.da.SearchCursor(ParcelsFinal, "Dist_to_Protected")
    # Perform calulation for score and append to new list. Accomodate for str and tuple field types
    for proximity in proximities:
        if isinstance(proximity, tuple):
            proximity = proximity[0]
        elif isinstance(proximity, str):
            proximity = float(proximity)
        all_proximities.append(proximity)

    # now we populate a new list and assign scores based on number ranges
    proximity_scores = []
    for proximity in all_proximities:
        if proximity == 0:
            proximity_scores.append(1)
        elif proximity > 0 and proximity < 2000:
            proximity_scores.append(0.75)
        elif proximity > 2000 and proximity < 4000:
            proximity_scores.append(.5)
        elif proximity > 4000:
            proximity_scores.append(0)

    # create new field
    arcpy.AddField_management(ParcelsFinal,
                              "SCORE_Proximity",
                              "DOUBLE",
                              field_length=50)

    x = 0

    # use update cursor to populate rows with list element and after each time the cursor moves down to the next row,
    # iterate to the next list element (x)
    with arcpy.da.UpdateCursor(ParcelsFinal, "SCORE_Proximity") as cursor:
        for row in cursor:
            row[0] = proximity_scores[x]
            cursor.updateRow(row)
            x += 1

    # ##################### FINAL PRIORITY SCORES ###########################

    sumOfScores = []
    scoreFields = arcpy.da.SearchCursor(ParcelsFinal, [
        "SCORE_Lotic_Deciles", "SCORE_Wetland_Deciles", "SCORE_Intactness",
        "SCORE_Patch_Size", "SCORE_Proximity"
    ])
    for score in scoreFields:
        sumScore = score[0] + score[1] + score[2] + score[3] + score[4]
        sumOfScores.append(sumScore)

    # create new field
    arcpy.AddField_management(ParcelsFinal,
                              "PRIORITY_SCORE",
                              "DOUBLE",
                              field_length=50)

    x = 0

    # use update cursor to populate rows with list element and after each time the cursor moves down to the next row,
    # iterate to the next list element (x)
    with arcpy.da.UpdateCursor(ParcelsFinal, "PRIORITY_SCORE") as cursor:
        for row in cursor:
            row[0] = sumOfScores[x]
            cursor.updateRow(row)
            x += 1
    # the order of the resulting list is identical to the original, so it can be appended as a new field and the values will
    # correspond with the rows they are meant to score

    # ################################## PRIORITY RANKING #######################################

    # now we calculate ranges for priority ranking with 4 breaks (Quartiles)
    ranges = np.percentile(sumOfScores, np.arange(0, 100, 25))

    final_priority_ranking = []
    for score in sumOfScores:
        if score >= ranges[0] and score <= ranges[1]:
            final_priority_ranking.append(None)
        elif score >= ranges[1] and score <= ranges[2]:
            final_priority_ranking.append(3)
        elif score >= ranges[2] and score <= ranges[3]:
            final_priority_ranking.append(2)
        elif score >= ranges[3]:
            final_priority_ranking.append(1)

    # create new field
    arcpy.AddField_management(ParcelsFinal,
                              "PRIORITY_RANKING",
                              "DOUBLE",
                              field_length=50)

    x = 0

    # use update cursor to populate rows with list element and after each time the cursor moves down to the next row,
    # iterate to the next list element (x)
    with arcpy.da.UpdateCursor(ParcelsFinal, "PRIORITY_RANKING") as cursor:
        for row in cursor:
            row[0] = final_priority_ranking[x]
            cursor.updateRow(row)
            x += 1

    arcpy.CheckInExtension("spatial")

    print("proccess complete")
    print("...........")
    print(
        "The resulting priority scored parcels feature class can be found in the user specified geodatabase by the name of 'ParcelsFinal'"
    )
    print(
        "To view the Conservation Priority ranking, symbolize the feature class by unique values, using the 'PRIORITY_RANKING' field."
    )
예제 #12
0
##here we are selecting out all of the coastal and marine MPAs
arcpy.Select_analysis(WDPA_Oct2018_shapefile_polygons_shp,
                      WDPA_Oct2018_shapefile_polygons__C_M_shp,
                      "MARINE = '1' Or MARINE = '2'")

# Process: Dissolve (3)
##dissolve the above so that the tabulate intersection won't double count the area of overlapping MPAs
arcpy.Dissolve_management(
    WDPA_Oct2018_shapefile_polygons__C_M_shp,
    WDPA_Oct2018_shapefile_polygons__C_M_dissolved_shp__2_, "PA_DEF", "",
    "MULTI_PART", "DISSOLVE_LINES")

# Process: Tabulate Intersection
##this tool calculates the area and percentage of each grid sell that is covered by an MPA and places in a table
arcpy.TabulateIntersection_analysis(
    worldsq_EA_shp, "Seq",
    WDPA_Oct2018_shapefile_polygons__C_M_dissolved_shp__2_,
    grid_all_CM_area_dissolved, "", "", "", "UNKNOWN")

#Repeat for no take MPAs

# Process: Select (5)
arcpy.Select_analysis(WDPA_Oct2018_shapefile_polygons__C_M_shp,
                      WDPA_Oct2018_shapefile_polygons_C_M_notake_shp,
                      "NO_TAKE = 'All'")

# Process: Dissolve (4)
arcpy.Dissolve_management(
    WDPA_Oct2018_shapefile_polygons_C_M_notake_shp,
    WDPA_Oct2018_shapefile_polygons__C_M_notake_dissolved_shp__2_, "PA_DEF",
    "", "MULTI_PART", "DISSOLVE_LINES")
# Obtain the impact information using the Tabulate intersection
############################################################################

############################################################################
# Loop through each impact layer and create the intersection table
#############################################################################
for Table in xrange(len(ImpactLayers)):
    # Get the cross tabulation values and output the results as a table
    InZoneFeatures = "Barbuda_FINAL_InundationClass_NEWDTM_PolygonV1"
    OuputTable = "Barbuda_InundationTable_" + ImpactLayers[Table]

    SUM_FIELDS = ""
    ImpactTable = arcpy.TabulateIntersection_analysis(
        in_zone_features=InZoneFeatures,
        zone_fields="gridcode",
        in_class_features=ImpactLayers[Table],
        out_table=OuputTable,
        class_fields=ImpactLayerClassList[Table],
        sum_fields=SUM_FIELDS,
        xy_tolerance="-1 Unknown",
        out_units="UNKNOWN")
    # Output the table as csv file for further processing
    arcpy.TableToTable_conversion(in_rows=ImpactTable,
                                  out_path=OutputCSVFileFolder,
                                  out_name=OuputTable + ".csv")
    arcpy.Delete_management(ImpactTable)
################################################################################
# Successful 25/02/20 and rerun 27/03/20 but to changes to the polygons
#
################################################################################
예제 #14
0
        MyFunctions.check_and_add_field(out_map, "BaseID_CROME", "LONG", 0)
        arcpy.CalculateField_management(out_map, "BaseID_CROME", "!OBJECTID!",
                                        "PYTHON_9.3")

        print("  Identifying farmland and amenity grass")
        # 'Natural surface' is mainly road verges and amenity grass in urban areas
        arcpy.MakeFeatureLayer_management(out_map, "ag_lyr")
        expression = Hab_field + " IN ('Agricultural land', 'Natural surface')"
        arcpy.SelectLayerByAttribute_management("ag_lyr",
                                                where_clause=expression)

        print(
            "  Calculating percentage of farmland and amenity features within CROME polygons"
        )
        arcpy.TabulateIntersection_analysis(
            "ag_lyr", ["OBJECTID", Hab_field, "BaseID_CROME", "Shape_Area"],
            CROME_data, "CROME_TI",
            ["lucode", "Land_Use_Description", "Simple", "Shape_Area"])

        # Sorting TI table by size so that larger intersections are first in the list
        print("  Sorting table with largest intersections first")
        arcpy.Sort_management("CROME_TI", "CROME_TI_sort",
                              [["AREA", "DESCENDING"]])
        # Delete all but the largest intersection. We need to do this, otherwise the join later is not robust - the wrong rows can be
        # copied across even if we think we have selected and joined to the right rows.
        print("Deleting identical (smaller intersection) rows")
        arcpy.DeleteIdentical_management("CROME_TI_sort", ["OBJECTID_1"])

        # Adding fields for CROME data
        out_map = "OSMM_CROME"
        print("Adding new fields for CROME data")
        MyFunctions.check_and_add_field(out_map, "CROME_desc", "TEXT", 50)
예제 #15
0
    'dec00_va79', 'dec00_va80', 'dec00_va81', 'dec00_va82', 'dec00_va83',
    'dec00_va84', 'dec00_va85', 'dec00_va86', 'dec00_va87', 'dec00_va88',
    'dec00_va89', 'dec00_va90', 'dec00_va91', 'dec00_va92', 'dec00_va93',
    'dec00_va94'
]
#The attribute field or fields that will be used to define zones
# Fields could be more than those above. In this demo, we need those four fields
in_class_features = "DC_Census2010.shp"  #this is after-changed shp
out_table = os.path.join(inputSpace, outPutFile)
class_fields = ["GEO_ID"
                ]  #The attribute field or fields used to define classes
try:
    os.remove(out_table)
except OSError:
    pass
arcpy.TabulateIntersection_analysis(in_zone_features, zone_fields,
                                    in_class_features, out_table, class_fields)

print "complete"


################################################################################################################################################
#%%
#Part 2: this function convert the tabulate intersection result into csv
def dbf_to_csv(out_table):
    csv_fn = out_table[:-4] + ".csv"  #Set the table as .csv format
    try:
        os.remove(csv_fn)
    except OSError:
        pass
    with open(csv_fn,
              'wb') as csvfile:  #Create a csv file and write contents from dbf
예제 #16
0
        arcpy.RepairGeometry_management(arcpy.env.scratchGDB + "/" + "Cat%s_to_erase"%(cat))
        print ("repaired IUCN category %s at: "%(cat)), datetime.datetime.now().time()
        arcpy.Select_analysis("allIUCN", arcpy.env.scratchGDB + "/" + "CatIatoV_to_diss", "IUCN_CAT = 'Ia' AND IUCN_CAT = 'Ib' AND IUCN_CAT = 'II' AND IUCN_CAT = 'III' AND IUCN_CAT = 'IV' AND IUCN_CAT = 'V'")
        print ("selected IUCN category Ia to V at: "), datetime.datetime.now().time()
        arcpy.Dissolve_management(arcpy.env.scratchGDB + "/" + "CatIatoV_to_diss",  arcpy.env.scratchGDB + "/" + "CatIatoV_to_erase", "ISO3","", "MULTI_PART")
        print ("dissolved IUCN category Ia to V at: "), datetime.datetime.now().time()
        arcpy.Delete_management(arcpy.env.scratchGDB + "/" + "CatIatoV_to_diss")
        arcpy.RepairGeometry_management(arcpy.env.scratchGDB + "/" + "CatIatoV_to_erase")
        print ("repaired IUCN category Ia to V  at: "), datetime.datetime.now().time()
        arcpy.Erase_analysis(arcpy.env.scratchGDB + "/" + "Cat%s_to_erase"%(cat), arcpy.env.scratchGDB + "/" + "CatIatoV_to_erase", arcpy.env.scratchGDB + "/" + "Cat%s_to_intrsec"%(cat) )
        print ("erased IUCN category %s at: "%(cat)), datetime.datetime.now().time()
        arcpy.Delete_management(arcpy.env.scratchGDB + "/" + "CatIatoV_to_erase")
        arcpy.RepairGeometry_management(arcpy.env.scratchGDB + "/" + "Cat%s_to_intrsec"%(cat))
        print ("repaired IUCN category %s at: "%(cat)), datetime.datetime.now().time()
    arcpy.Delete_management(arcpy.env.scratchGDB + "/" + "Cat%s_to_erase"%(cat))
    arcpy.TabulateIntersection_analysis("countries", "ISO", arcpy.env.scratchGDB + "/" + "Cat%s_to_intrsec"%(cat), arcpy.env.scratchGDB + "/" + "y_tab_%s"%(cat))
    print ("tabulated %s PA intersection with countries at: "%(cat)), datetime.datetime.now().time()
    with arcpy.da.UpdateCursor("countries",["ISO", "Shape_Area", "PAcat%s"%(cat)]) as upd_cur:
        for upd_row in upd_cur:
            with arcpy.da.SearchCursor(arcpy.env.scratchGDB + "/" + "y_tab_%s"%(cat),["ISO", "PERCENTAGE"]) as search_cur:
                for search_row in search_cur:
                    if search_row[0] == upd_row[0]:
                        upd_row[2] = search_row[1]
                        upd_cur.updateRow(upd_row)
                if not upd_row[2]:
                    upd_row[2] = 0
                    upd_cur.updateRow(upd_row)
    arcpy.Delete_management(arcpy.env.scratchGDB + "/" + "y_tab_%s"%(cat))

arcpy.Delete_management(arcpy.env.scratchGDB + "/" + "CatVI_to_intrsec" )
예제 #17
0
               (entry, item))
         arcpy.AddField_management(item, entry, "DOUBLE", "", "",
                                   "", alias[1], "", "", "")
         print("%s added to attribute table of %s" % (entry, item))
     else:
         print("Adding field: %s to %s attribute table" %
               (entry, item))
         arcpy.AddField_management(item, entry, "DOUBLE", "", "",
                                   "", alias[2], "", "", "")
         print("%s added to attribute table of %s" % (entry, item))
 for feature in class_feat:
     if feature == class_feat[0]:  ## Stream_Road_Xings
         print(
             "Tabulating road and stream intersections for %s \nresults in %s"
             % (item, xing_table_out[0]))
         arcpy.TabulateIntersection_analysis(
             item, bound_field[0], feature, xing_table_out[0])
         print("%s created in %s\n\n" %
               (xing_table_out[0], out_gdb_path))
         ## Change tabulate intersection output field name from PNT_COUNT to 'Stream_Road_Xings'
         fieldlist = arcpy.ListFields(xing_table_out[0])
         for field in fieldlist:
             if field.name == miles_table[0]:
                 print("Changing %s to %s" %
                       (field.name, stream_road_field[0]))
                 arcpy.AlterField_management(
                     xing_table_out[0], field.name,
                     stream_road_field[0], stream_road_field[1])
                 print("%s changed to %s in %s" %
                       (miles_table[0], stream_road_field[0],
                        xing_table_out[0]))
             else:
예제 #18
0
def shiftAlignToFlow(inFeats,
                     outFeats,
                     fldID,
                     in_hydroNet,
                     in_Catch,
                     fldLevel="StreamLeve",
                     scratchGDB="in_memory"):
    '''Shifts features to align with flowlines, with preference for primary flowlines over tributaries.
   Incorporates variation on code found here: https://arcpy.wordpress.com/2012/11/15/shifting-features/
   
   Parameters:
   - inFeats: Input features to be shifted
   - outFeats: Output shifted features
   - fldID: Field in inFeats, containing uniques IDs
   - in_hydroNet = Input hydrological network dataset
   - in_Catch = Input catchments from NHDPlus, assumed to correspond with data in in_hydroNet
   - fldLevel: Field in inFlowlines indicating the stream level; lower values indicate it is the mainstem (assumed "StreamLeve" by default)
   - scratchGDB: Geodatabase for storing intermediate outputs (assumed in_memory by default
   '''

    # Set up some variables
    descHydro = arcpy.Describe(in_hydroNet)
    nwDataset = descHydro.catalogPath
    catPath = os.path.dirname(
        nwDataset)  # This is where hydro layers will be found
    nhdFlowline = catPath + os.sep + "NHDFlowline"
    nhdArea = catPath + os.sep + "NHDArea"
    nhdWaterbody = catPath + os.sep + "NHDWaterbody"
    minFld = "MIN_%s" % fldLevel

    # Make a copy of input features, and add a field to store alignment type
    tmpFeats = scratchGDB + os.sep + "tmpFeats"
    arcpy.CopyFeatures_management(inFeats, tmpFeats)
    inFeats = tmpFeats
    arcpy.AddField_management(inFeats, "AlignType", "TEXT", "", "", 1)

    # # Get (pseudo-)centroid of features to be shifted
    # centroids = scratchGDB + os.sep + "centroids"
    # arcpy.FeatureToPoint_management(inFeats, centroids, "INSIDE")

    # Make feature layers
    lyrFeats = arcpy.MakeFeatureLayer_management(inFeats, "lyr_inFeats")
    lyrFlowlines = arcpy.MakeFeatureLayer_management(nhdFlowline,
                                                     "lyr_Flowlines")
    lyrCatch = arcpy.MakeFeatureLayer_management(in_Catch, "lyr_Catchments")

    qry = "FType = 460"  # StreamRiver only
    lyrStreamRiver = arcpy.MakeFeatureLayer_management(nhdArea,
                                                       "StreamRiver_Poly", qry)

    qry = "FType = 390 OR FType = 436"  # LakePond or Reservoir only
    lyrLakePond = arcpy.MakeFeatureLayer_management(nhdWaterbody,
                                                    "LakePondRes_Poly", qry)

    ### Assign features to stream or river (wide-water) alignment processes
    # # Select the input features intersecting StreamRiver polys: new selection
    # printMsg("Selecting features intersecting StreamRiver...")
    # lyrFeats = arcpy.SelectLayerByLocation_management (lyrFeats, "INTERSECT", lyrStreamRiver, "", "NEW_SELECTION", "NOT_INVERT")

    # # Select the features intersecting LakePond or Reservoir polys: add to existing selection
    # printMsg("Selecting features intersecting LakePond or Reservoir...")
    # lyrFeats = arcpy.SelectLayerByLocation_management (lyrFeats, "INTERSECT", lyrLakePond, "", "ADD_TO_SELECTION", "NOT_INVERT")

    # Calculate percentage of PF covered by widewater features
    printMsg("Calculating percentages of PFs covered by widewater features...")
    tabStreamRiver = scratchGDB + os.sep + "tabStreamRiver"
    SR = arcpy.TabulateIntersection_analysis(lyrFeats, fldID, lyrStreamRiver,
                                             tabStreamRiver)
    tabLakePond = scratchGDB + os.sep + "tabLakePond"
    LP = arcpy.TabulateIntersection_analysis(lyrFeats, fldID, lyrLakePond,
                                             tabLakePond)
    percTab = scratchGDB + os.sep + "percTab"
    arcpy.Merge_management([SR, LP], percTab)
    statsTab = scratchGDB + os.sep + "statsTab"
    arcpy.Statistics_analysis(percTab, statsTab, [["PERCENTAGE", "SUM"]],
                              fldID)
    arcpy.JoinField_management(lyrFeats, fldID, statsTab, fldID,
                               "SUM_PERCENTAGE")

    # Assign features to river (R) or stream (S) process
    codeblock = '''def procType(percent):
         if not percent:
            return "S"
         elif percent < 25:
            return "S"
         else:
            return "R"
         '''
    expression = "procType(!SUM_PERCENTAGE!)"
    arcpy.CalculateField_management(lyrFeats, "AlignType", expression,
                                    "PYTHON", codeblock)

    # # Assign selected features to river process
    # count = countSelectedFeatures(lyrFeats)
    # if count > 0:
    # printMsg("Assigning %s features to river (wide-water) process"%str(count))
    # arcpy.CalculateField_management (lyrFeats, "AlignType", "R", "PYTHON")
    # else:
    # pass

    # # Switch selection and assign to stream process
    # lyrFeats = arcpy.SelectLayerByAttribute_management (lyrFeats, "SWITCH_SELECTION")
    # count = countSelectedFeatures(lyrFeats)
    # if count > 0:
    # printMsg("Assigning %s features to stream process"%str(count))
    # arcpy.CalculateField_management (lyrFeats, "AlignType", "S", "PYTHON")
    # else:
    # pass

    # Save out features getting the river (wide-water) process
    printMsg("Saving out the features for river (wide-water) process")
    riverFeats = scratchGDB + os.sep + "riverFeats"
    # arcpy.CopyFeatures_management (lyrFeats, riverFeats)
    where_clause = '"AlignType" = \'R\''
    arcpy.Select_analysis(lyrFeats, riverFeats, where_clause)

    # Save out features getting the stream process
    printMsg("Switching selection and saving out the PFs for stream process")
    lyrFeats = arcpy.SelectLayerByAttribute_management(lyrFeats,
                                                       "SWITCH_SELECTION")
    streamFeats = scratchGDB + os.sep + "streamFeats"
    # arcpy.CopyFeatures_management (lyrFeats, streamFeats)
    where_clause = '"AlignType" = \'S\''
    arcpy.Select_analysis(lyrFeats, streamFeats, where_clause)

    ### Select the appropriate flowline features to be used for stream or river processes
    ## Stream process
    # Select catchments intersecting stream features
    printMsg("Selecting catchments intersecting stream features...")
    lyrCatch = arcpy.SelectLayerByLocation_management(lyrCatch, "INTERSECT",
                                                      streamFeats, "",
                                                      "NEW_SELECTION")

    # Clip flowlines to selected catchments
    printMsg("Clipping flowlines to selected catchments...")
    streamLines = scratchGDB + os.sep + "streamLines"
    arcpy.Clip_analysis(lyrFlowlines, lyrCatch, streamLines)

    ## River process
    # Select StreamRiver and LakePond polys intersecting input features
    printMsg("Selecting open water polygons intersecting input features...")
    lyrStreamRiver = arcpy.SelectLayerByLocation_management(
        lyrStreamRiver, "INTERSECT", riverFeats)
    lyrLakePond = arcpy.SelectLayerByLocation_management(
        lyrLakePond, "INTERSECT", riverFeats)

    # Merge selected polygons into single layer
    printMsg("Merging widewater features...")
    wideWater = scratchGDB + os.sep + "wideWater"
    arcpy.Merge_management([lyrStreamRiver, lyrLakePond], wideWater)

    # Select catchments intersecting river features
    printMsg("Selecting catchments intersecting river features...")
    lyrCatch = arcpy.SelectLayerByLocation_management(lyrCatch, "INTERSECT",
                                                      riverFeats, "",
                                                      "NEW_SELECTION")

    # Clip widewater to selected catchments
    printMsg("Clipping widewaters to selected catchments...")
    clipWideWater = scratchGDB + os.sep + "clipWideWater"
    arcpy.Clip_analysis(wideWater, lyrCatch, clipWideWater)

    # Clip flowlines to clipped widewater
    printMsg("Clipping flowlines to clipped widewater features...")
    riverLines = scratchGDB + os.sep + "riverLines"
    arcpy.Clip_analysis(lyrFlowlines, clipWideWater, riverLines)

    # Run alignment separately for stream and river features
    streamParms = [streamFeats, streamLines, "_stream"]
    riverParms = [riverFeats, riverLines, "_river"]
    for parms in [streamParms, riverParms]:
        inFeats = parms[0]
        inFlowlines = parms[1]
        nameTag = parms[2]

        # Get (pseudo-)centroid of features to be shifted
        centroids = scratchGDB + os.sep + "centroids%s" % nameTag
        arcpy.FeatureToPoint_management(inFeats, centroids, "INSIDE")

        # Get near table: distance from centroids to 3 nearest flowlines, including location info
        # Note: This output cannot be written to memory or it doesn't produce the location info, which is needed. Why, Arc, why???
        nearTab = arcpy.env.scratchGDB + os.sep + "nearTab%s" % nameTag
        arcpy.GenerateNearTable_analysis(centroids, inFlowlines, nearTab, "",
                                         "LOCATION", "ANGLE", "ALL", "3",
                                         "PLANAR")

        # Join centroid IDs to near table
        arcpy.JoinField_management(nearTab, "IN_FID", centroids, "OBJECTID",
                                   fldID)

        # Join StreamLevel from flowlines to near table
        arcpy.JoinField_management(nearTab, "NEAR_FID", inFlowlines,
                                   "OBJECTID", fldLevel)

        # Get summary statistics to determine lowest StreamLevel value for each centroid; attach to near table
        sumTab = scratchGDB + os.sep + "sumTab%s" % nameTag
        stats = "%s MIN" % fldLevel
        arcpy.Statistics_analysis(nearTab, sumTab, stats, "IN_FID")
        arcpy.JoinField_management(nearTab, "IN_FID", sumTab, "IN_FID", minFld)

        # Keep only records with lowest StreamLevel values
        where_clause = "StreamLeve = %s" % minFld
        arcpy.MakeTableView_management(nearTab, "nearTab_View", where_clause)

        # Get summary statistics to determine shortest distance among remaining records; attach to near table
        sumTab2 = scratchGDB + os.sep + "sumTab2%s" % nameTag
        arcpy.Statistics_analysis("nearTab_View", sumTab2, "NEAR_DIST MIN",
                                  "IN_FID")
        arcpy.JoinField_management(nearTab, "IN_FID", sumTab2, "IN_FID",
                                   "MIN_NEAR_DIST")

        # Get final record set
        where_clause = "StreamLeve = %s AND NEAR_DIST = MIN_NEAR_DIST" % minFld
        arcpy.MakeTableView_management(nearTab, "nearTab_View", where_clause)

        # Join from/to x,y fields from near table to the input features
        arcpy.JoinField_management(inFeats, fldID, nearTab, fldID,
                                   ["FROM_X", "FROM_Y", "NEAR_X", "NEAR_Y"])

        # Calculate shift in x/y directions
        arcpy.AddField_management(inFeats, "DIFF_X", "DOUBLE")
        arcpy.AddField_management(inFeats, "DIFF_Y", "DOUBLE")
        arcpy.CalculateField_management(inFeats, "DIFF_X",
                                        "!NEAR_X!- !FROM_X!", "PYTHON")
        arcpy.CalculateField_management(inFeats, "DIFF_Y",
                                        "!NEAR_Y!- !FROM_Y!", "PYTHON")

        # Calculate new position, and shift polygon
        # Note that (FROM_X, FROM_Y) is not necessarily the same as SHAPE@XY, because the former is a pseudo-centroid forced to be contained by the input feature. If the shape of the feature is strongly curved, the true centroid may not be contained. I'm guessing (but am not 100% sure) that SHAPE@XY is the true centroid. This is why I calculated the shift rather than simply moving SHAPE@XY to (NEAR_X, NEAR_Y).
        with arcpy.da.UpdateCursor(inFeats,
                                   ["SHAPE@XY", "DIFF_X", "DIFF_Y"]) as cursor:
            for row in cursor:
                x_shift = row[1]
                y_shift = row[2]
                x_old = row[0][0]
                y_old = row[0][1]
                x_new = x_old + x_shift
                y_new = y_old + y_shift
                row[0] = (x_new, y_new)
                cursor.updateRow(row)

    # Merge output to a single feature class
    arcpy.Merge_management([streamFeats, riverFeats], outFeats)
    mergeLines = scratchGDB + os.sep + "mergeLines"
    arcpy.Merge_management([streamLines, riverLines], mergeLines)

    return (outFeats, clipWideWater, mergeLines)
예제 #19
0
 for fc in fclist:
     stpos=str(fc).find("_mineplant_layer")
     if stpos != -1:
         fc_temp=fc[:stpos]
         fc_fishnet=fc_temp + "_fishnet"
         fc_GFW=fc_temp + "_mineplant" #replace with desired string extension
          #print fc_fishnet,fc_GFW
     #for i in  Fishnet_list:
         #if fc==i:  
           #if 'GFW_logging' in str(fc):
                 #ingridList.append(fc)
                 #arcpy.AddGeometryAttributes_management(fc,Geometry_Properties,Length_Unit,Area_Unit,Coordinate_System)
          try:
                     #for t in ingridList:
              #if i + '_GFW_mining_updated_201402_layer' in str(fc):
              arcpy.TabulateIntersection_analysis(fc_fishnet, zoneFld, fc_GFW, gdb + "\\" + fc_fishnet + "_mining", class_fields, sum_fields, xy_tol, out_units)
              #print "intersecting " + "%s" %fc_fishnet + "%s" %fc_GFW
               arcpy.JoinField_management(fc_fishnet,"OBJECTID",fc_fishnet + "_mineplant","OBJECTID_1",["COMMODITY","PNT_COUNT"])
               print "Joining " + "%s" %fc_fishnet  + "to the " + "%s" %fc_fishnet + "_mineplant"
         except:
              #print "intersection failed " + "%s" %fc_fishnet + "%s" %fc_GFW
               
                     #convert 'geodesic area' features to raster
                     in_features=fc_fishnet
                     field="DAM_COUNT"
                     out_raster=str(fc_fishnet) + "_dams"
                     cell_size="10000"
                     # Set Mask environment
                     arcpy.env.mask = r"C:\Data\WWF\Processing\Global_Processing.gdb\landmask"
                     arcpy.env.snapRaster = r"C:\Data\WWF\Processing\Global_Processing.gdb\landmask"
                     #run tool
## EA Coordinate system. Now the the selecting features are in the same projection as the wildfires so a 
## tabulate intersect can be preformed to determine the amount of fire by ecoregion. This is a replication of the 
## Ecoregion Fire Occurrence analysis completed in Fall 2019, but this method is faster.
##    
    ## Ecoregoins inputs = AK_Ecoregions, HI_Ecoregions, US_Ecoregions <----Can use AK/HI/US_ECO_path variables
    ## for pathname
    ## Fire Occurrence inputs = F_O_AK_Prj, F_O_HI_Prj, F_O_US_Prj <----All in Fire_Occ.gdb
    arcpy.env.workspace = "B:/GIS_Projects/SP_Wildfire/Reprocessing/Workspace/Fire_Occ.gdb"
    arcpy.env.parallelProcessingFactor = "100%"   
    zone_field = "ECO_CODE"
    sum_field = "FIRE_SIZE"
    F_O_output_eco = ["FO__AK_By_Eco", "FO__HI_By_Eco", "FO__US_By_Eco"]
    for fc in arcpy.ListFeatureClasses():
        if fc == "F_O_AK_Prj":
            print("Tabulating intersect with %s and %s with HECTARES as output units"%(fc, AK_ECO_path))
            arcpy.TabulateIntersection_analysis(AK_ECO_path, zone_field, fc, F_O_output_eco[0], "", sum_field, "", "HECTARES")
            print("Tabulate intersect completed.\n%s created"%(F_O_output_eco[0]))
        elif fc == "F_O_HI_Prj":
            print("Tabulating intersect with %s and %s with HECTARES as output units"%(fc, HI_ECO_path))
            arcpy.TabulateIntersection_analysis(HI_ECO_path, zone_field, fc, F_O_output_eco[1], "", sum_field, "", "HECTARES")
            print("Tabulate intersect completed.\n%s created"%(F_O_output_eco[1]))            
        elif fc == "F_O_US_Prj":
            print("Tabulating intersect with %s and %s with HECTARES as output units"%(fc, US_ECO_path))
            arcpy.TabulateIntersection_analysis(US_ECO_path, zone_field, fc, F_O_output_eco[2], "", sum_field, "", "HECTARES")
            print("Tabulate intersect completed.\n%s created"%(F_O_output_eco[2]))            
        else:
            print("%s is not a target"%(fc))
##
##-----------------Repeat the same geoprocessing workflow but for the 50 states-------------------
## USA has already been assigned to the feature class as a variable containing all of the 50 states and territories.
## Use a list to filter out the areas that are territories and not states.
            print(
                "      Clipping out OS open GS not already covered by OSGS (i.e. leaving just rural areas)"
            )
            # First dissolve OSGS to get a simple area for clipping
            arcpy.Dissolve_management("OSGS", "OSGS_dissolve")
            arcpy.Erase_analysis("OS_Open_GS", "OSGS_dissolve",
                                 "OS_Open_GS_clip")
            print("      Deleting slivers")
            MyFunctions.delete_by_size("OS_Open_GS_clip", 20)
            MyFunctions.check_and_repair("OS_Open_GS_clip")

        print(
            "      Calculating percentage of base map features within OpenGS polygons"
        )
        arcpy.TabulateIntersection_analysis(
            Base_map, ["BaseID_GS", Hab_field, "Shape_Area"],
            "OS_Open_GS_clip", "GS_TI",
            ["GSID", "function", "distName1", "Shape_Area"])
        # Delete all the rows with overlap less than 50%. Doing this with a temporary selection does not seem to work,
        # as the wrong rows get joined.
        arcpy.MakeTableView_management("GS_TI", "TI_lyr")
        arcpy.SelectLayerByAttribute_management("TI_lyr",
                                                where_clause="PERCENTAGE < 50")
        arcpy.DeleteRows_management("TI_lyr")
        arcpy.Delete_management("TI_lyr")

        print("      Adding fields for open GS function and name")
        MyFunctions.check_and_add_field(Base_map, "OpenGS_func", "TEXT", 40)
        MyFunctions.check_and_add_field(Base_map, "OpenGS_name", "TEXT", 100)

        # Note: this does not seem to work, i.e. the table is sorted OK but then the wrong rows get copied over.
        # So I switched to the alternative approach of deleting all the rows we don't want as above.
예제 #22
0
print("Project complete")

print(
    "Create geodatabase to place following table (throws error if not in geodatabase)"
)
arcpy.CreateFileGDB_management(reedyGISDir, "table.gdb")
print("Geodatabase created")

print(
    "Use tabulate intersection to calculate area and percentage of impervious in each subbasin"
)
arcpy.TabulateIntersection_analysis(
    in_zone_features="initDelin\ReedySubs871_DissSPNC.shp",
    zone_fields="GRIDCODE",
    in_class_features="imperviousLayers\\reedyImpervious.shp",
    out_table=reedyGISDir + "\\table.gdb\\reedyICpct2",
    class_fields="",
    sum_fields="",
    xy_tolerance="-1 Unknown",
    out_units="UNKNOWN")
print("Tabulate intersection complete")

print(
    "Join the previous table to the subbasin table to match up areas and percentages (using GRIDCODE)"
)
arcpy.JoinField_management("initDelin\ReedySubs871_DissSPNC.shp", "GRIDCODE",
                           "table.gdb\\reedyICpct2", "GRIDCODE")
print("Table join complete")

print("Export the attribute table into a csv file to specified location")
arcpy.TableToTable_conversion("initDelin\ReedySubs871_DissSPNC.shp",
        print ("Adding CROME farmland interpretation to " + LAD)
        print ("  Adding habitat fields")
        MyFunctions.check_and_add_field(out_map, "CROME_farmland", "TEXT", 50)

        print("      Copying OBJECTID for base map")
        MyFunctions.check_and_add_field(out_map, "BaseID_CROME", "LONG", 0)
        arcpy.CalculateField_management(out_map, "BaseID_CROME", "!OBJECTID!", "PYTHON_9.3")

        print ("  Identifying farmland")
        arcpy.MakeFeatureLayer_management(out_map, "ag_lyr")
        expression = "Interpreted_hab IN ('Agricultural land', 'Natural surface') OR Interpreted_hab LIKE 'Arable%'"
        expression = expression + " OR Interpreted_hab LIKE 'Improved grassland%'"
        arcpy.SelectLayerByAttribute_management("ag_lyr", where_clause=expression)

        print("      Calculating percentage of farmland features within CROME polygons")
        arcpy.TabulateIntersection_analysis(CROME_data, ["LUCODE", "Land Use Description", "field", "Shape_Area"],
                                            "ag_lyr", "CROME_TI", ["BaseID_CROME", Hab_field, "Shape_Area"])

        print("      Sorting TI table by size so that larger intersections are first in the list")
        arcpy.Sort_management("CROME_TI", "CROME_TI_sort", [["AREA", "ASCENDING"]])

        print ("      Adding fields for CROME data")
        MyFunctions.check_and_add_field(out_map, "CROME_desc", "TEXT", 50)
        MyFunctions.check_and_add_field(out_map, "CROME_simple", "TEXT", 30)

        print ("      Joining CROME info for base map polygons that are >50% inside CROME polygons")
        arcpy.AddJoin_management("ag_lyr", "BaseID_CROME", "CROME_TI_sort", "BaseID_CROME", "KEEP_ALL")

        print("      Copying CROME data")
        arcpy.SelectLayerByAttribute_management("ag_lyr", where_clause="CROME_TI_sort.PERCENTAGE > 50")
        arcpy.CalculateField_management("ag_lyr", out_map + ".CROME_desc", "!CROME_TI_sort.Land Use Description!", "PYTHON_9.3")
        arcpy.CalculateField_management("ag_lyr", out_map + ".CROME_simple", "!CROME_TI_sort.field!", "PYTHON_9.3")
예제 #24
0
            print("      Saving base map Index IDs and areas")
            MyFunctions.check_and_add_field(Base_map, base_ID, "LONG", 0)
            arcpy.CalculateField_management(Base_map, base_ID,
                                            "!" + BaseIndexID + "!",
                                            "PYTHON_9.3")

            MyFunctions.check_and_add_field(Base_map, base_area, "FLOAT", 0)
            arcpy.CalculateField_management(Base_map, base_area,
                                            '!Shape_Area!', "PYTHON_9.3")

            print(
                "      Calculating percentage areas of new features within each base map polygon"
            )
            arcpy.TabulateIntersection_analysis(
                Base_map, [base_ID, base_key, base_TI_fields, base_area],
                "New_snap_clean",
                "Base_TI", [new_ID, new_key, new_TI_fields, new_area],
                xy_tolerance=xy_tol)

            # Decide which polygons to split, based on the percentage of overlap and the absolute area of the overlap
            # Also decide whether the polygon should be interpreted as new feature or base map attributes, in cases where they conflict.
            print(
                "      Interpreting overlaps and deciding which polygons to split"
            )

            # Add Relationship field to identify which polygons to split, which to add new feature information to, and which to retain unchanged
            MyFunctions.check_and_add_field("Base_TI", Relationship_field,
                                            "TEXT", 0)

            codeblock = """
def relationship(overlap_area, percent_overlap, ignore_low, ignore_high, significant_size):
예제 #25
0
    if __name__ == '__main__':
        print "Ejecutando Tabulate Intersection a 64bits ...."
        print infea, zone_fields, in_class_features, out_table, class_fields, sum_fields, xy_tolerance, out_units
        recibe_environ(variables, entorno)
        ruta_txt_fids1 = cambia_caracteres(ruta_txt_fids1)
        ruta_txt_fids2 = cambia_caracteres(ruta_txt_fids2)
        lista_fids1 = leer_text(ruta_txt_fids1.decode('utf-8'))
        lista_fids2 = leer_text(ruta_txt_fids2.decode('utf-8'))
        infea = cambia_caracteres(infea)
        out_table = cambia_caracteres(out_table)
        in_class_features = cambia_caracteres(in_class_features)
        layer_infea = recibir_seleccion(lista_fids1, infea)
        layer_in_class_features = recibir_seleccion(lista_fids2,
                                                    in_class_features)
        print layer_infea, zone_fields, layer_in_class_features, out_table, class_fields, sum_fields, xy_tolerance, out_units
        in_class_features = cambia_caracteres(in_class_features)
        arcpy.TabulateIntersection_analysis(
            in_zone_features=layer_infea,
            zone_fields=zone_fields,
            in_class_features=layer_in_class_features,
            out_table=out_table,
            class_fields=class_fields,
            sum_fields=sum_fields,
            xy_tolerance=xy_tolerance,
            out_units=out_units)

except exceptions.Exception as e:
    print e.__class__, e.__doc__, e.message
    os.system("pause")
예제 #26
0
     if not fcs:
         arcpy.FeatureClassToGeodatabase_conversion(fish,new_Workspace)
         print "copying " "%s" %fish + " to " "%s" %new_Workspace
     elif len ([x for x in fcs if "fishnet" in x]) > 0:
         print "skipping copy of fishnet to GDB : " "%s" % gdb
     #tabulate intersection in the GDB
     in_string= fish[25:]
     in_string_= in_string.replace("_fishnet","")
     in_string__= in_string_.replace("_"," ") 
     # in_string___=' '.join(in_string__.split())
     in_features = os.path.join(inpath,in_string__)
     out_table = gdb
     if arcpy.Exists(gdb):
         print "intersection already performed"
     else:    
         arcpy.TabulateIntersection_analysis(fish,zone_fields,in_features,out_table,class_fields,sum_fields,xy_tolerance,out_units)
         print "calculating area of biome per grid cell :  " "%s" % out_table
 # changing field precision of area and percentage fields by creating new field, copying numbers in original field and deleting original fields   
         arcpy.AddField_management(out_table, "New_Area", "FLOAT")
         print "adding new field to store biome area"
         arcpy.AddField_management(out_table, "New_Perc", "FLOAT")
         print "adding new field to store biome percentage cover"
         with arcpy.da.UpdateCursor(out_table,fields1) as cursor:
             for row in  cursor:
                 row[1] = row[0]
                 cursor.updateRow(row)
         with arcpy.da.UpdateCursor(out_table,fields2) as cursor:
             for row in  cursor:
                 row[1] = row[0]
                 cursor.updateRow(row)
         arcpy.DeleteField_management(out_table, ['PERCENTAGE','AREA'])
with arcpy.da.SearchCursor(counties, ['COUNTY_NAM']) as cursor:
    county_list = sorted({row[0] for row in cursor})

pu_lyr = arcpy.MakeFeatureLayer_management(target_features, "pu_lyr")
county_lyr = arcpy.MakeFeatureLayer_management(counties, "county_lyr")
merge_list = []

for county in county_list:
    print "working on " + county + " at " + datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
    arcpy.SelectLayerByAttribute_management(county_lyr, "NEW_SELECTION", '"COUNTY_NAM" = ' + "'%s'"%county)
    arcpy.SelectLayerByLocation_management(pu_lyr, "INTERSECT", county_lyr, "", "NEW_SELECTION", )

    # Calculate intersection between Target Feature and Join Features and produces output table
    intersect = os.path.join(outGDB, county)
    classFeatures = ["ELSeason", "OccProb"]
    arcpy.TabulateIntersection_analysis(pu_lyr, "unique_id", join_features, intersect, classFeatures)
    count = arcpy.GetCount_management(intersect)
    print('{} County has {} records'.format(county, count[0]))
    merge_list.append(intersect)

merge = arcpy.Merge_management(merge_list, os.path.join(outGDB, "SGCNxPU_occurrence"))

# delete PUs that have less than 10% (4046.86 square meters) of area overlapped by particular species
# could change this threshold if needed
with arcpy.da.UpdateCursor(merge, "PERCENTAGE") as cursor:
     for row in cursor:
        if row[0] > 10:
            pass
        else:
            cursor.deleteRow()
예제 #28
0
파일: table.py 프로젝트: Baohong/ATtILA2
def createPolygonValueCountTable(inPolygonFeature, inPolygonIdField,
                                 inValueDataset, inValueField, outTable,
                                 metricConst, index, cleanupList):
    """Transfer a value count from an specified geodataset to input polygon features, using simple areal weighting.
    
    **Description:**

        This function uses Tabulate Intersection to construct a table with a field containing the area weighted
        value count (e.g., POPULATION) for each input polygon unit. The value field is renamed from the metric 
        constants entry.
        
        Returns the created output table and the generated output value count field name.
        
    **Arguments:**

        * *inPolygonFeature* - input Polygon feature class with full path.
        * *inPolygonIdField* - the name of the field in the reporting unit feature class containing a unique identifier
        * *inValueDataset* - input value feature class or raster with full path
        * *inValueField* - the name of the field in the value feature class containing count values. Will be empty if 
                           the inValueDataset is a raster
        * *outTable* -  the output table that will contain calculated population values
        * *metricConst* - an ATtILA2 object containing constant values to match documentation
        * *index* - if this function is going to be run multiple times, this index is used to keep track of intermediate
                    outputs and field names.
        * *cleanupList* - object containing commands and parameters to perform at cleanup time.
        
    **Returns:**

        * table (type unknown - string representation?)
        * string - the generated output value count field name
        
    """
    from arcpy import env
    from .. import errors
    from . import files
    tempEnvironment0 = env.snapRaster
    tempEnvironment1 = env.cellSize

    try:
        desc = arcpy.Describe(inValueDataset)

        if desc.datasetType == "RasterDataset":
            # set the raster environments so the raster operations align with the census grid cell boundaries
            env.snapRaster = inValueDataset
            env.cellSize = desc.meanCellWidth

            # calculate the population for the polygon features using zonal statistics as table
            arcpy.sa.ZonalStatisticsAsTable(inPolygonFeature, inPolygonIdField,
                                            inValueDataset, outTable, "DATA",
                                            "SUM")

            # Rename the population count field.
            outValueField = metricConst.valueCountFieldNames[index]
            arcpy.AlterField_management(outTable, "SUM", outValueField,
                                        outValueField)

        else:  # census features are polygons
            # Create a copy of the census feature class that we can add new fields to for calculations.
            fieldMappings = arcpy.FieldMappings()
            fieldMappings.addTable(inValueDataset)
            [
                fieldMappings.removeFieldMap(
                    fieldMappings.findFieldMapIndex(aFld.name))
                for aFld in fieldMappings.fields if aFld.name != inValueField
            ]
            tempName = "%s_%s" % (metricConst.shortName, desc.baseName)
            tempCensusFeature = files.nameIntermediateFile(
                [tempName + "_Work", "FeatureClass"], cleanupList)
            inValueDataset = arcpy.FeatureClassToFeatureClass_conversion(
                inValueDataset, env.workspace,
                os.path.basename(tempCensusFeature), "", fieldMappings)

            # Add a dummy field to the copied census feature class and calculate it to a value of 1.
            classField = "tmpClass"
            arcpy.AddField_management(inValueDataset, classField, "SHORT")
            arcpy.CalculateField_management(inValueDataset, classField, 1)

            # Construct a table with a field containing the area weighted value count for each input polygon unit
            arcpy.TabulateIntersection_analysis(inPolygonFeature,
                                                [inPolygonIdField],
                                                inValueDataset, outTable,
                                                [classField], [inValueField])

            # Rename the population count field.
            outValueField = metricConst.valueCountFieldNames[index]
            arcpy.AlterField_management(outTable, inValueField, outValueField,
                                        outValueField)

        return outTable, outValueField

    except Exception as e:
        errors.standardErrorHandling(e)

    finally:
        env.snapRaster = tempEnvironment0
        env.cellSize = tempEnvironment1
예제 #29
0
# delete cursor and row objects to remove locks on data
del row
del cursor

print "Finished calculating width"

# clip roads to ws boundaries

clipped_roads = r"H:\RoadCalculations\Roads.gdb\Clipped_Roads2"
arcpy.Clip_analysis(in_streets, WS, clipped_roads)
print "END OF CLIP"

# Create new field for clipped_roads for road area

arcpy.AddField_management(clipped_roads, "Road_Area","DOUBLE", 10,5, "","Road_Area")

# Calculate field

expression = "!SHAPE.LENGTH! * !Rd_Width!"

arcpy.CalculateField_management(clipped_roads, "Road_Area", expression, "PYTHON")

print "END OF CALCULATE FIELD"
# Tabulate Intersection of WS and the clipped roads feature.
# Interested in sum of road area

outputtable = r"H:\RoadCalculations\Roads.gdb\RoadArea_byWS2"
arcpy.TabulateIntersection_analysis(WS, "GRIDCODE", clipped_roads, outputtable,"","Road_Area")

print "END OF SCRIpT"
예제 #30
0
def AttributeEOs(in_ProcFeats, in_eoReps, in_sppExcl, in_eoSelOrder,
                 in_consLands, in_consLands_flat, out_procEOs, out_sumTab):
    '''Scores EOs based on a number of factors. 
   Inputs:
   in_ProcFeats: Input feature class with "site-worthy" procedural features
   in_eoReps: Input feature class or table with EO reps, e.g., EO_Reps_All.shp
   in_sppExcl: Input table containing list of elements to be excluded from the process, e.g., EO_Exclusions.dbf
   in_eoSelOrder: Input table designating selection order for different EO rank codes, e.g., EORANKNUM.dbf
   in_consLands: Input feature class with conservation lands (managed areas), e.g., MAs.shp
   out_procEOs: Output EOs with TIER scores
   out_sumTab: Output table summarizing number of included EOs per element'''

    # Dissolve procedural features on EO_ID
    printMsg("Dissolving procedural features by EO...")
    arcpy.Dissolve_management(in_ProcFeats, out_procEOs,
                              ["SF_EOID", "ELCODE", "SNAME"],
                              [["SFID", "COUNT"]], "MULTI_PART")

    # Make EO_ID into string to match EO reps - FFS why do I have to do this??
    arcpy.AddField_management(out_procEOs, "EO_ID", "TEXT", "", "", 20)
    arcpy.CalculateField_management(out_procEOs, "EO_ID", "!SF_EOID!",
                                    "PYTHON")

    # Join some fields
    printMsg("Joining fields from EO reps...")
    arcpy.JoinField_management(out_procEOs, "EO_ID", in_eoReps, "EO_ID",
                               ["EORANK", "RND_GRANK", "LASTOBS"])
    arcpy.JoinField_management(out_procEOs, "EORANK", in_eoSelOrder, "EORANK",
                               "SEL_ORDER")

    # Add and calculate some fields
    # Field: NEW_GRANK
    printMsg("Calculating NEW_GRANK field...")
    arcpy.AddField_management(out_procEOs, "NEW_GRANK", "TEXT", "", "", 2)
    codeblock = '''def reclass(granks):
      if (granks == "T1"):
         return "G1"
      elif granks == "T2":
         return "G2"
      elif granks == "T3":
         return "G3"
      elif granks == "T4":
         return "G4"
      elif granks in ("T5","GH","GNA","GNR","GU","TNR","TX","") or granks == None:
         return "G5"
      else:
         return granks'''
    expression = "reclass(!RND_GRANK!)"
    arcpy.CalculateField_management(out_procEOs, "NEW_GRANK", expression,
                                    "PYTHON_9.3", codeblock)

    # Field: EXCLUSION
    arcpy.AddField_management(out_procEOs, "EXCLUSION", "TEXT", "", "",
                              20)  # This will be calculated below by groups

    # Set EXCLUSION value for low EO ranks
    codeblock = '''def reclass(order):
      if order == 0:
         return "Low EO Rank"
      else:
         return "Keep"'''
    expression = "reclass(!SEL_ORDER!)"
    arcpy.CalculateField_management(out_procEOs, "EXCLUSION", expression,
                                    "PYTHON_9.3", codeblock)

    # Set EXCLUSION value for species exclusions
    printMsg("Excluding certain species...")
    arcpy.MakeFeatureLayer_management(out_procEOs, "lyr_EO")
    arcpy.AddJoin_management("lyr_EO", "ELCODE", in_sppExcl, "ELCODE",
                             "KEEP_COMMON")
    arcpy.CalculateField_management("lyr_EO", "EXCLUSION",
                                    "'Species Exclusion'", "PYTHON")

    # Tabulate intersection of EOs with military land where BMI > '2'
    printMsg("Tabulating intersection of EOs with military lands...")
    where_clause = '"MATYPE" IN (\'Military Installation\', \'Military Recreation Area\', \'NASA Facility\', \'sold - Military Installation\', \'surplus - Military Installation\') AND "BMI" > \'2\''
    arcpy.MakeFeatureLayer_management(in_consLands, "lyr_Military",
                                      where_clause)
    TabInter_mil = scratchGDB + os.sep + "TabInter_mil"
    arcpy.TabulateIntersection_analysis(out_procEOs, "EO_ID", "lyr_Military",
                                        TabInter_mil)

    # Field: PERCENT_MIL
    arcpy.AddField_management(TabInter_mil, "PERCENT_MIL", "DOUBLE")
    arcpy.CalculateField_management(TabInter_mil, "PERCENT_MIL",
                                    "!PERCENTAGE!", "PYTHON")
    arcpy.JoinField_management(out_procEOs, "EO_ID", TabInter_mil, "EO_ID",
                               "PERCENT_MIL")

    # Set EXCLUSION value for Military exclusions
    where_clause = '"EXCLUSION" = \'Keep\' and "PERCENT_MIL" > 25'
    arcpy.MakeFeatureLayer_management(out_procEOs, "lyr_EO", where_clause)
    arcpy.CalculateField_management("lyr_EO", "EXCLUSION",
                                    "'Military Exclusion'", "PYTHON")

    # Tabulate Intersection of EOs with conservation lands where BMI = 1
    printMsg("Tabulating intersection of EOs with BMI-1 lands...")
    where_clause = '"BMI" = \'1\''
    arcpy.MakeFeatureLayer_management(in_consLands_flat, "lyr_bmi1",
                                      where_clause)
    TabInter_bmi1 = scratchGDB + os.sep + "TabInter_bmi1"
    arcpy.TabulateIntersection_analysis(out_procEOs, "EO_ID", "lyr_bmi1",
                                        TabInter_bmi1)

    # Field: PERCENT_bmi1
    arcpy.AddField_management(TabInter_bmi1, "PERCENT_bmi1", "DOUBLE")
    arcpy.CalculateField_management(TabInter_bmi1, "PERCENT_bmi1",
                                    "!PERCENTAGE!", "PYTHON")
    arcpy.JoinField_management(out_procEOs, "EO_ID", TabInter_bmi1, "EO_ID",
                               "PERCENT_bmi1")

    # Tabulate Intersection of EOs with conservation lands where BMI = 2
    printMsg("Tabulating intersection of EOs with BMI-2 lands...")
    where_clause = '"BMI" = \'2\''
    arcpy.MakeFeatureLayer_management(in_consLands_flat, "lyr_bmi2",
                                      where_clause)
    TabInter_bmi2 = scratchGDB + os.sep + "TabInter_bmi2"
    arcpy.TabulateIntersection_analysis(out_procEOs, "EO_ID", "lyr_bmi2",
                                        TabInter_bmi2)

    # Field: PERCENT_bmi2
    arcpy.AddField_management(TabInter_bmi2, "PERCENT_bmi2", "DOUBLE")
    arcpy.CalculateField_management(TabInter_bmi2, "PERCENT_bmi2",
                                    "!PERCENTAGE!", "PYTHON")
    arcpy.JoinField_management(out_procEOs, "EO_ID", TabInter_bmi2, "EO_ID",
                               "PERCENT_bmi2")

    printMsg("Calculating additional fields...")
    # Field: BMI_score
    arcpy.AddField_management(out_procEOs, "BMI_score", "DOUBLE")
    codeblock = '''def score(bmi1, bmi2):
      if not bmi1:
         bmi1 = 0
      if not bmi2:
         bmi2 = 0
      score = (2*bmi1 + bmi2)/2
      return score'''
    expression = 'score( !PERCENT_bmi1!, !PERCENT_bmi2!)'
    arcpy.CalculateField_management(out_procEOs, "BMI_score", "!PERCENTAGE!",
                                    "PYTHON")

    # Field: ysnNAP
    arcpy.AddField_management(out_procEOs, "ysnNAP", "SHORT")
    arcpy.MakeFeatureLayer_management(out_procEOs, "lyr_EO")
    where_clause = '"MATYPE" = \'State Natural Area Preserve\''
    arcpy.MakeFeatureLayer_management(in_consLands, "lyr_NAP", where_clause)
    arcpy.SelectLayerByLocation_management("lyr_EO", "INTERSECT", "lyr_NAP",
                                           "", "NEW_SELECTION", "NOT_INVERT")
    arcpy.CalculateField_management("lyr_EO", "ysnNAP", 1, "PYTHON")
    #arcpy.SelectLayerByAttribute_management("lyr_EO", "CLEAR_SELECTION")

    # # Field: NEAR_DIST
    # where_clause = '"BMI" in (\'1\',\'2\')'
    # arcpy.MakeFeatureLayer_management (in_consLands, "lyr_ConsLands", where_clause)
    # arcpy.Near_analysis(out_procEOs, "lyr_ConsLands", "", "NO_LOCATION", "NO_ANGLE", "PLANAR")

    # # Field: INV_DIST
    # arcpy.AddField_management(out_procEOs, "INV_DIST", "DOUBLE")
    # expression = "1/math.sqrt(!NEAR_DIST! + 1)"
    # arcpy.CalculateField_management(out_procEOs, "INV_DIST", expression , "PYTHON_9.3")

    # Get subset of EOs to summarize based on EXCLUSION field
    where_clause = '"EXCLUSION" = \'Keep\''
    arcpy.MakeFeatureLayer_management(out_procEOs, "lyr_EO", where_clause)

    # Summarize to get count of EOs per element
    printMsg("Summarizing...")
    arcpy.Statistics_analysis("lyr_EO", out_sumTab, [["EO_ID", "COUNT"]],
                              "ELCODE")
    arcpy.JoinField_management("lyr_EO", "ELCODE", out_sumTab, "ELCODE",
                               "COUNT_EO_ID")

    printMsg("EO attribution complete")
    return (out_procEOs, out_sumTab)