Exemplo n.º 1
0
                                             "%ScratchWorkspace%\\FracTEMP",
                                             Counter)

ncurrentstep += 1
arcpy.AddMessage("Deleting residual segments - Step " + str(ncurrentstep) +
                 "/" + str(nstep))
FracTEMPToPoints = arcpy.FeatureVerticesToPoints_management(
    FracTEMP, "%ScratchWorkspace%\\FracTEMPToPoints", "BOTH_ENDS")

arcpy.AddXY_management(FracTEMPToPoints)
arcpy.DeleteIdentical_management(FracTEMPToPoints, ["POINT_X", "POINT_Y"])

arcpy.AddField_management(FracTEMP, "Fusion", "LONG", "", "", "", "",
                          "NULLABLE", "NON_REQUIRED", "")
fieldnames = [f.name for f in arcpy.ListFields(FracTEMP)]
arcpy.CalculateField_management(FracTEMP, "Fusion", "[" + fieldnames[0] + "]",
                                "VB", "")

SpatialRef = arcpy.Describe(Polygon).spatialReference
XY = arcpy.MakeXYEventLayer_management(NearTable, "NEAR_X", "NEAR_Y",
                                       "%ScratchWorkspace%\\XY", SpatialRef,
                                       "")

fieldnames = [f.name for f in arcpy.ListFields(FracTEMPToPoints)]
NearTable2 = arcpy.GenerateNearTable_analysis(XY, FracTEMPToPoints,
                                              "NearTable2", "", "LOCATION",
                                              "NO_ANGLE", "CLOSEST", "")
arcpy.JoinField_management(FracTEMPToPoints, fieldnames[0], NearTable2,
                           "NEAR_FID", ["NEAR_FID"])

MakeFracTEMPToPoints = arcpy.MakeFeatureLayer_management(
    FracTEMPToPoints, "%ScratchWorkspace%\\MakeFracTEMPToPoints", "", "",
Exemplo n.º 2
0
	nome = nome.replace(' Xvii',' XVII')
	nome = nome.replace(' Xvi',' XVI')
	nome = nome.replace(' Xx',' XX')
	#siglas
	nome = nome.replace('Apa ','APA ')
	nome = nome.replace('Cr ','CR ')
	nome = nome.replace('Floe ','FLOE ')
	nome = nome.replace('Floe ','FLOE ')
	nome = nome.replace('Flona ','FLONA ')
	nome = nome.replace('Pa ','PA ')
	nome = nome.replace('Pad ','PAD ')
	nome = nome.replace('Pad ','PA ')
	nome = nome.replace('Pae ','PAE ')
	nome = nome.replace('Par ','PAR ')
	nome = nome.replace('Pc ','PC ')
	nome = nome.replace('Pca ','PCA ')
	nome = nome.replace('Pct ','PCT ')
	nome = nome.replace('Pds ','PDS ')
	nome = nome.replace('Pe ','PE ')
	nome = nome.replace('Pic ','PIC ')
	nome = nome.replace('Prb ','PRB ')
	nome = nome.replace('Rds ','RDS ')
	nome = nome.replace('Resex ','RESEX ')
	nome = nome.replace('Rppn ','RPPN ')
	
	return nome
	
	"""

arcpy.CalculateField_management(shp, nome, expressao, 'PYTHON_9.3', codeblock)
import arcpy
import os

arcpy.env.overwriteOutput = True

fcparcel = arcpy.GetParameterAsText(0)
areafield = arcpy.GetParameterAsText(1)
fcpubroad = arcpy.GetParameterAsText(2)
addfieldname = arcpy.GetParameterAsText(3)

arcpy.DeleteField_management(fcpubroad, "publength")
arcpy.AddField_management(fcpubroad, "publength", "FLOAT")

fieldexpression = "!shape.length!"
arcpy.CalculateField_management(fcpubroad, "publength", fieldexpression,
                                "PYTHON_9.3")


def spatialjoinsum(intar, injoin, infieldname, outfieldname, jointype):
    tempData = arcpy.env.scratchGDB + os.path.sep + "output"
    targetFeatures = intar
    joinFeatures = injoin

    # Create a new fieldmappings and add the two input feature classes.
    fieldmappings = arcpy.FieldMappings()
    fieldmappings.addTable(targetFeatures)
    fieldmappings.addTable(joinFeatures)

    FieldIndex = fieldmappings.findFieldMapIndex(infieldname)
    fieldmap = fieldmappings.getFieldMap(FieldIndex)
Exemplo n.º 4
0
    intslope, 'pente_poly', "NO_SIMPLIFY", "VALUE"
)  # raster to polygon conversion accepts only the integer type raster

# add the new fields in created polygons and copy the value into it.
arcpy.AddField_management("soil_poly.shp", "soil_type", "SHORT", "", "", "",
                          "", "", "", "")
arcpy.AddField_management("LU_poly.shp", "LU_type", "SHORT", "", "", "", "",
                          "", "", "")
arcpy.AddField_management("pente_poly.shp", "slope", "FLOAT", "", "", 32, "",
                          "", "", "")

arcpy.Delete_management("times.tif")
arcpy.Delete_management("slope_in_deg.tif")

# copy the created fields to new fields
arcpy.CalculateField_management("soil_poly.shp", "soil_type", "!gridcode!",
                                "PYTHON_9.3")
arcpy.DeleteField_management("soil_poly.shp", "gridcode")
arcpy.CalculateField_management("LU_poly.shp", "LU_type", "!gridcode!",
                                "PYTHON_9.3")
arcpy.DeleteField_management("LU_poly.shp", "gridcode")
arcpy.CalculateField_management("pente_poly.shp", "slope", "!gridcode!/100.0",
                                "PYTHON_9.3")
arcpy.DeleteField_management("pente_poly.shp", "gridcode")
# overlain the soil type, land use, and slope for creating the HRU map
arcpy.Intersect_analysis(["soil_poly.shp", "LU_poly.shp"], "HRU1", "ALL")
arcpy.Intersect_analysis(["HRU1.shp", "pente_poly.shp"], "HRU2", "ALL")

# delete HRU1 from directory and unnecessary fields in attribute table of HRU2
arcpy.Delete_management("HRU1.shp")
arcpy.Delete_management("soil_poly.shp")
arcpy.Delete_management("pente_poly.shp")
Exemplo n.º 5
0
Arquivo: PTTW.py Projeto: yhyuan/PTTW
    + INPUT_GDB +
    "\\PTTW_no_UTM,ID,-1,-1;MUN_CA_AUT \"MUN_CA_AUT\" true true false 50 Text 0 0 ,First,#,"
    + INPUT_GDB +
    "\\PTTW_no_UTM,MUN_CA_AUT,-1,-1;REFERENCE \"REFERENCE\" true true false 50 Text 0 0 ,First,#,"
    + INPUT_GDB +
    "\\PTTW_no_UTM,REFERENCE,-1,-1;UNIQUE \"UNIQUE\" true true false 254 Text 0 0 ,First,#,"
    + INPUT_GDB +
    "\\PTTW_no_UTM,UNIQUE,-1,-1;fix \"fix\" true true false 2 Short 0 0 ,First,#,"
    + INPUT_GDB + "\\PTTW_no_UTM,fix,-1,-1", "")

# Process the PTTW_no_utm by adding the latitude and longitude with 0 and convert the table to a feature class named PTTW_NO_UTM_Layer.
arcpy.AddField_management(OUTPUT_GDB + "\\PTTW_no_UTM", "LATITUDE", "DOUBLE",
                          "", "", "", "", "NULLABLE", "NON_REQUIRED", "")
arcpy.AddField_management(OUTPUT_GDB + "\\PTTW_no_UTM", "LONGITUDE", "DOUBLE",
                          "", "", "", "", "NULLABLE", "NON_REQUIRED", "")
arcpy.CalculateField_management(OUTPUT_GDB + "\\PTTW_no_UTM", "LATITUDE", "0",
                                "VB", "")
arcpy.CalculateField_management(OUTPUT_GDB + "\\PTTW_no_UTM", "LONGITUDE", "0",
                                "VB", "")
arcpy.MakeXYEventLayer_management(
    OUTPUT_GDB + "\\PTTW_no_UTM", "longitude", "latitude",
    OUTPUT_GDB + "\\PTTW_NO_UTM_Layer",
    "GEOGCS['GCS_North_American_1983',DATUM['D_North_American_1983',SPHEROID['GRS_1980',6378137.0,298.257222101]],PRIMEM['Greenwich',0.0],UNIT['Degree',0.0174532925199433]];-400 -400 1000000000;0 1;0 1;8.98315284119521E-09;0.001;0.001;IsHighPrecision"
)
PTTW_no_UTM = OUTPUT_GDB + "\\PTTW_no_UTM"
arcpy.FeatureClassToFeatureClass_conversion(
    OUTPUT_GDB + "\\PTTW_NO_UTM_Layer", OUTPUT_GDB, "PTTW_NO_UTM_Layer", "",
    "PERMITNO 'PERMITNO' true true false 50 Text 0 0 ,First,#," + PTTW_no_UTM +
    ",PERMITNO,-1,-1;FILENO 'FILENO' true true false 200 Text 0 0 ,First,#," +
    PTTW_no_UTM +
    ",FILENO,-1,-1;CLIENTNAME 'CLIENTNAME' true true false 200 Text 0 0 ,First,#,"
    + PTTW_no_UTM +
Exemplo n.º 6
0
        if ext in os.path.splitext(workspace)
]:
    workspace = workspace
else:
    workspace = os.path.dirname(workspace)
arcpy.env.workspace = workspace
# Feature Dataset aanmaken voor de FC's per vak
oDS = workspace + "/" + DS
if not arcpy.Exists(oDS):
    arcpy.CreateFeatureDataset_management(workspace, DS)
    arcpy.AddMessage("Uitvoer Dataset:  " + DS + " aangemaakt!")
else:
    arcpy.AddMessage("Bestaande uitvoer Dataset:  " + DS + " gebruiken!")
#---------------------------------------------------------
# Vakid overzetten naar Taludid
arcpy.CalculateField_management(VakFC, TALID, "[" + IDkol + "]", "VB")
#---------------------------------------------------------
# 1 vaknaam koppelen aan dwarsprofielpunten.
# Eerst alleen de punten selecteren die we mee willen nemen. Zie kolom GebruikVoorDWP als deze niet 1 is dan ook niet meenemen.
arcpy.Select_analysis(in_features=P10FC,
                      out_feature_class="xxTALUDPnt",
                      where_clause=Conkol + " = 1")
# Create a new fieldmappings and add the two input feature classes.
fieldmap = arcpy.FieldMappings()
fieldmap.addTable("xxTALUDPnt")
fieldmap.addTable(VakFC)
arcpy.SpatialJoin_analysis(target_features="xxTALUDPnt",
                           join_features=VakFC,
                           out_feature_class="xxProfPnt",
                           join_operation="JOIN_ONE_TO_ONE",
                           join_type="KEEP_ALL",
Exemplo n.º 7
0
    inFile = checkgeoOutputDir + "/" + layer + ".dbf"
    count = str(arcpy.GetCount_management(inFile))
    myWrite(logFile,
            "04 - Repairing geometry of for " + count + " polygons:  ")
    if count == '0':
        arcpy.Delete_management(inFile)
    if count > '0':
        inFile = tempOutputDir + "/YT.gdb/" + layer
        arcpy.RepairGeometry_management(inFile, "KEEP_NULL")
    myWrite(logFile, count + " wrong geometry\n")

    ## Create AREA field and set its value
    inFile = tempOutputDir + "/YT.gdb/" + layer
    myWrite(logFile, "05 - Adding Field AREA to " + inFile + "...")
    arcpy.AddField_management(inFile, "GIS_AREA", "DOUBLE", "14", "4")
    arcpy.CalculateField_management(inFile, "GIS_AREA",
                                    "(!shape.area@hectares!)", "PYTHON")
    myWrite(logFile, " Done\n")

    ## Create PERIMETER field and set its value
    myWrite(logFile, "06 - Adding Field PERIMETER to " + inFile + "...")
    arcpy.AddField_management(inFile, "GIS_PERI", "DOUBLE", "10", "1")
    arcpy.CalculateField_management(inFile, "GIS_PERI",
                                    "(!shape.length@meters!)", "PYTHON")
    myWrite(logFile, " Done\n")

    myWrite(logFile, "07 - Adding Field HEADER to " + inFile + "...")
    arcpy.AddField_management(inFile, "HEADER_ID", "TEXT", 40)
    arcpy.CalculateField_management(inFile, "HEADER_ID", HeaderId, "PYTHON")
    myWrite(logFile, " Done\n")

    ## Create a CAS_ID field and derive its value
def main(
        input_analysis_area,  # Required | Polygon feature class path
        input_surveys,  # Required | Polygon feature class path
        input_sites,  # Required | Polygon feature class path
        input_analysis_features,  # Required | Dict: {feature path: feature field, ..}
        input_analysis_rasters,  # Required | List: [raster path,..]
        output_folder,  # Required | Folder path
        input_surveys_select_field=None,  # Optional | Field name
        input_surveys_select_value=None,  # Optional | Field value
        input_sites_select_field=None,  # Optional | Field name
        input_sites_select_value=None,  # Optional | Field value
        n_non_site_sample_points=40000,  # Optional | integer
        n_site_sample_points=20000,  # Optional | integer
        min_site_sample_distance_m=15,  # Optional | integer (in meters)
):

    try:
        try:
            arcpy.CheckOutExtension('Spatial')
        except:
            raise LicenseError

#--- Create Geodatabase ----------------------------------------------------------------------------------------------

# Clear memory JIC
        deleteInMemory()

        # Date/time stamp for outputs
        dt_stamp = re.sub('[^0-9]', '', str(datetime.datetime.now())[:16])

        # Output fGDB name and full path
        file_name = os.path.splitext(os.path.basename(__file__))[0]
        gdb_name = "{}_{}".format(file_name, dt_stamp)
        gdb_path = os.path.join(output_folder, gdb_name + '.gdb')

        # Create a geodatabase
        arcpy.CreateFileGDB_management(output_folder, gdb_name, "10.0")

        # Set workspace to fGDB
        arcpy.env.workspace = gdb_name

        # Get input analysis area spatial reference for outputs
        spatial_ref = arcpy.Describe(input_analysis_area).spatialReference

        # Create feature datasets
        input_features_dataset = os.path.join(gdb_path, 'Input_Features')
        arcpy.CreateFeatureDataset_management(gdb_path, 'Input_Features',
                                              spatial_ref)

        print "Random Forest Modeling {}".format(datetime.datetime.now())
        print "Running environment: Python - {}".format(sys.version)
        print "User: {}".format(user)
        print "Output Location: {}".format(gdb_path)

        #--- Get Anaysis Area ------------------------------------------------------------------------------------------------

        # Copy analysis area and get acres
        analysis_area = os.path.join(gdb_path, 'Analysis_Area')
        arcpy.CopyFeatures_management(input_analysis_area, analysis_area)
        analysis_acres_field, analysis_acres_total = get_acres(analysis_area)
        print '\nTotal acres within analysis area: {}\n'.format(
            round(analysis_acres_total, 4))

        #--- Get Survey Data -------------------------------------------------------------------------------------------------

        # Get the input surveys FC as a feature layer clipped to analysis area
        arcpy.Clip_analysis(input_surveys, analysis_area,
                            "in_memory\\in_surveys")

        # Get the surveys subset if given a subselection and copy to fGDB
        if input_surveys_select_field:
            for field in arcpy.Describe("in_memory\\in_surveys").fields:
                if field.name == input_surveys_select_field:
                    if field.type in ("Integer", "SmallInteger"):
                        where = '"{}" = {}'.format(input_surveys_select_field,
                                                   input_surveys_select_value)
                    elif field.type == "String":
                        where = "{} = '{}'".format(input_surveys_select_field,
                                                   input_surveys_select_value)
                    break
            print "Surveys subselection: WHERE [{}] = '{}'".format(
                case_field, case_value)
            arcpy.MakeFeatureLayer_management("in_memory\\in_surveys",
                                              "in_memory\\surveys_raw", where)
        # If no sub-selection, keep everything
        else:
            print("No surveys subselection - using all records")
            arcpy.MakeFeatureLayer_management("in_memory\\in_surveys",
                                              "in_memory\\surveys_raw")

        # Dissolve and get survey acreage
        analysis_surveys = os.path.join(gdb_path, 'Analysis_Surveys')
        arcpy.Dissolve_management("in_memory\\surveys_raw", analysis_surveys)
        acres_field, survey_acres_total = get_acres(analysis_surveys)

        survey_coverage = survey_acres_total / analysis_acres_total
        print('Survey acres within analysis area: {}'.format(
            round(survey_acres_total, 2)))
        print('Survey proportion within analysis area: {}\n'.format(
            round(survey_coverage, 3)))

        # Enforce minimum survey coverage for analysis
        if survey_coverage < 0.05:
            raise InsufficientSurveyCoverage

        arcpy.Delete_management("in_memory\\in_surveys")
        arcpy.Delete_management("in_memory\\in_surveys_raw")

        #--- Get Site Data ---------------------------------------------------------------------------------------------------

        analysis_sites = os.path.join(gdb_path, 'Analysis_Sites')

        # Clip sites to survey coverage
        arcpy.Clip_analysis(input_sites, analysis_surveys,
                            "in_memory\\in_sites")

        # Get the sites subset if given a subselection and copy to fGDB
        if input_sites_select_field:
            for field in arcpy.Describe("in_memory\\in_sites").fields:
                if field.name == input_surveys_select_field:
                    if field.type in ("Integer", "SmallInteger"):
                        where = '"{}" = {}'.format(input_surveys_select_field,
                                                   input_sites_select_value)
                    elif field.type == "String":
                        where = "{} = '{}'".format(input_surveys_select_field,
                                                   input_sites_select_value)
                    break
            print "Sites subselection: where [{}] = '{}'".format(
                input_surveys_select_field, input_sites_select_value)
            arcpy.MakeFeatureLayer_management("in_memory\\in_sites",
                                              "in_memory\\sites_raw", where)
        # If no sub-selection, keep everything
        else:
            print("No sites subselection - using all records")
            arcpy.MakeFeatureLayer_management("in_memory\\in_sites",
                                              "in_memory\\sites_raw")

        arcpy.CopyFeatures_management("in_memory\\sites_raw", analysis_sites)

        site_count = int(
            arcpy.GetCount_management(analysis_sites).getOutput(0))
        site_density = round(site_count / survey_acres_total, 4)
        acres_per_site = round(1 / site_density, 2)

        print 'Sites identified for analysis: {}'.format(site_count)
        print 'Site density in surveyed areas (sites/acre): {}'.format(
            site_density)
        print 'Approximately 1 site every {} acres\n'.format(acres_per_site)

        if site_count < 30:
            raise InsufficientSiteSample

        arcpy.Delete_management("in_memory\\sites")
        arcpy.Delete_management("in_memory\\sites_raw")

        #--- Create Sample Dataset -------------------------------------------------------------------------------------------

        non_site_points = os.path.join(gdb_path, 'Sample_Points_Non_Site')
        site_points = os.path.join(gdb_path, 'Sample_Points_Site')
        raw_point_dataset = os.path.join(gdb_path, 'Raw_Points_Dataset')
        final_point_dataset = os.path.join(gdb_path, 'Analysis_Points_Dataset')

        print('Creating point cloud..')

        desc = arcpy.Describe(analysis_area)
        xmin = desc.extent.XMin
        xmax = desc.extent.XMax
        ymin = desc.extent.YMin
        ymax = desc.extent.YMax

        # Create fishnet
        spacing = int(np.sqrt(n_non_site_sample_points))
        arcpy.CreateFishnet_management(
            out_feature_class=non_site_points,
            origin_coord="{} {}".format(xmin, ymin),  # lower left
            y_axis_coord="{} {}".format(xmin,
                                        ymin + 10),  # lower left up ten meters
            number_rows=spacing,
            number_columns=spacing,
            corner_coord="{} {}".format(xmax, ymax),  # upper right
            labels="LABELS",
            template=analysis_area,
            geometry_type="POLYLINE")
        # Dump the polyline feature and point non_site_points to the pt fc
        arcpy.Delete_management(non_site_points)  # This is the polyline
        non_site_points += '_label'  # Now its the points

        arcpy.CreateRandomPoints_management(
            out_path=gdb_path,
            out_name=os.path.basename(site_points),
            constraining_feature_class=analysis_sites,
            number_of_points_or_field=n_site_sample_points,
            minimum_allowed_distance="{} Meters".format(
                min_site_sample_distance_m),
        )

        # Create the template for the final points fc
        arcpy.CreateFeatureclass_management(
            out_path=gdb_path,
            out_name=os.path.basename(raw_point_dataset),
            geometry_type='POINT',
            spatial_reference=non_site_points)

        # Add class ids to site and not site points
        for fc in [site_points, non_site_points, raw_point_dataset]:
            arcpy.AddField_management(fc, 'Class', "SHORT")

        # TODO: don't encode unsurveyed areas
        arcpy.CalculateField_management(non_site_points, "Class", 0,
                                        "PYTHON_9.3")
        arcpy.CalculateField_management(site_points, "Class", 1, "PYTHON_9.3")

        # select by location - analysis points within a site need to be coded 1
        non_site_points_lyr = arcpy.MakeFeatureLayer_management(
            non_site_points, 'in_memory\\non_site_points')
        arcpy.SelectLayerByLocation_management(non_site_points_lyr,
                                               "INTERSECT", analysis_sites)
        arcpy.CalculateField_management(non_site_points_lyr, "Class", 1,
                                        "PYTHON_9.3")  # These ones are sites

        # Append site_points and non_site_points to final_poit_dataset and delete
        arcpy.Append_management([site_points, non_site_points],
                                raw_point_dataset, "NO_TEST")

        # Encode if final points are within a survey or not
        arcpy.AddField_management(raw_point_dataset, 'Surveyed', "SHORT")
        arcpy.CalculateField_management(raw_point_dataset, "Surveyed", 0,
                                        "PYTHON_9.3")
        raw_points_lyr = arcpy.MakeFeatureLayer_management(
            raw_point_dataset, 'in_memory\\raw_point_dataset')
        arcpy.SelectLayerByLocation_management(raw_points_lyr, "INTERSECT",
                                               analysis_surveys)
        arcpy.CalculateField_management(raw_points_lyr, "Surveyed", 1,
                                        "PYTHON_9.3")

        # Delete the polyline feature and rename point feature class
        arcpy.Delete_management(non_site_points)
        arcpy.Delete_management(site_points)

        arcpy.Delete_management('in_memory\\non_site_points')
        arcpy.Delete_management('in_memory\\raw_point_dataset')

        #--- Data Attribution Loops ------------------------------------------------------------------------------------------

        # Feature Classes
        feature_class_inputs = list(input_analysis_features.keys())
        analysis_fields = list(
            input_analysis_features.values()) + ['Class', 'Surveyed']

        for input_fc_path, field in input_analysis_features.items():
            fc_name = os.path.basename(str(input_fc_path))

            print 'Getting data from feature class: {}.{}'.format(
                fc_name, field)

            out_path = os.path.join(input_features_dataset, fc_name)
            arcpy.Clip_analysis(input_fc_path, analysis_area, out_path)

        feature_class_point_data = os.path.join(input_features_dataset,
                                                'feature_class_data')
        arcpy.Intersect_analysis(
            in_features=feature_class_inputs + [raw_point_dataset],
            out_feature_class=final_point_dataset,
        )

        # clean up fields
        drop_fields = [
            f for f in arcpy.ListFields(final_point_dataset, "*")
            if f.name not in analysis_fields
        ]
        for drop_field in drop_fields:
            try:
                arcpy.DeleteField_management(final_point_dataset,
                                             drop_field.name)
            except:
                pass
                # print ('Did not delete field: [{}]'.format(drop_field.name))

        arcpy.Delete_management(raw_point_dataset)

        # Rasters
        raster_clips = []

        for raster in input_analysis_rasters:
            raster_name = os.path.basename(raster)
            out_raster = os.path.join(gdb_path, 'raster_' + raster_name)

            print 'Getting data from raster: {}'.format(raster_name)

            arcpy.Clip_management(
                in_raster=raster,
                out_raster=out_raster,
                in_template_dataset=analysis_area,
                clipping_geometry="ClippingGeometry",
            )
            raster_clips.append([raster, raster_name])

            analysis_fields.append(raster_name)

        arcpy.sa.ExtractMultiValuesToPoints(final_point_dataset, raster_clips)

        #--- Prepare Models --------------------------------------------------------------------------------------------------

        df = feature_class_to_pandas_data_frame(final_point_dataset,
                                                analysis_fields,
                                                null_value=-99999)

        # One hot encode categorical variables
        df, encode_columns = auto_one_hot(df)
        df = df.drop(encode_columns, axis=1)

        # Isolate the unsurveyed areas as extrapolation set
        extrapolation_set = df[df.Surveyed == 0]
        extrapolation_set = extrapolation_set.drop(['Class', 'Surveyed'],
                                                   axis=1)

        # Split the Xs and Ys into train and test sets
        modeling_set = df[df.Surveyed == 1]
        modeling_set_Ys = modeling_set['Class']
        modeling_set_Xs = modeling_set.drop(['Class', 'Surveyed'], axis=1)
        trnX, tstX, trnY, tstY = train_test_split(modeling_set_Xs,
                                                  modeling_set_Ys,
                                                  test_size=0.3,
                                                  random_state=42)

        # Prep classifiers
        classifiers = {
            'K Nearest Neighbors': KNeighborsClassifier(),
            'Random Forest': RandomForestClassifier(),
            'Gradient Boosted Decision Trees': GradientBoostingClassifier(),
            'Logistic Regression': LogisticRegression(),
            'Neural Network Multi-layer Perceptron': MLPClassifier(),
            'Linear Support Vector Classifier': LinearSVC(),
            'Support Vector Machine': SVC(),
        }

        #--- Evaluate Model --------------------------------------------------------------------------------------------------

        print '\n', ' Model Results '.center(60, '-'), '\n'

        cv = 10
        performance = {}

        for name, model in classifiers.items():

            scores = cross_val_score(model, trnX, trnY, cv=cv)
            mean = scores.mean()
            stdev = scores.std()
            print "\n{}:\n\n\tAccuracy: {:.12f} (+/- {:.3f})".format(
                name, mean, stdev * 2)

            performance[name] = (mean, stdev)

            print '\n', '\t\t', 'Params'.center(45, '-')
            for k, v in sorted(model.get_params().items()):
                print '\t\t|', k.ljust(25, '.'), str(v)[:15].rjust(15,
                                                                   '.'), '|'
            print '\t\t', ''.center(45, '-'), '\n'

        ranked_models = sorted(performance.items(),
                               key=lambda x: x[1][0],
                               reverse=True)

        #--- Tune Best Model -----------------------------------------------------------------------------------------------

        best_classifier_name, (best_mean, best_std) = ranked_models[0]
        best_classifier = classifiers[best_classifier_name]

        print ' {} Gridsearch '.format(best_classifier_name).center(60, '#')
        print "Baseline Accuracy: {:.12f} (+/- {:.3f})".format(
            best_mean, best_std * 2)

        gscv = GridSearchCV(best_classifier,
                            parameters[best_classifier_name],
                            n_jobs=-1,
                            cv=cv,
                            error_score=0)
        gscv.fit(trnX, trnY)

        # TODO: Write to csv
        print pd.DataFrame(gscv.cv_results_)

        #--- Validate Tuned Model --------------------------------------------------------------------------------------------

        hypertuned_model = eval(str(gscv.best_estimator_))
        # Train on the whole data set less final validation set
        hypertuned_model.fit(trnX, trnY)
        predicted_y = hypertuned_model.predict(tstX)

        print
        print " {} Out-of-Sample Testing ".format(best_classifier_name).center(
            60, '#')
        print
        print "Accuracy Score: {}".format(accuracy_score(tstY, predicted_y))
        print
        print "Confusion Matrix:"
        print "|TN|FP|"
        print "|FN|TP|"
        print confusion_matrix(tstY, predicted_y)
        print
        print "Class Report:"
        print classification_report(tstY, predicted_y)

        #--- Model Unsurveyed Areas ------------------------------------------------------------------------------------------

        #--- Output Results to ArcMap ----------------------------------------------------------------------------------------

        #--- House Keeping ---------------------------------------------------------------------------------------------------

        arcpy.CheckInExtension('Spatial')

    except arcpy.ExecuteError:
        print arcpy.GetMessages(2)
    except InsufficientSurveyCoverage:
        pass
    except InsufficientSiteSample:
        pass
    except LicenseError:
        pass
    except:
        pass
Exemplo n.º 9
0
def delineate_catchment(in_dem, in_sink, out_catchment):
    arcpy.CheckOutExtension("Spatial")
    workspace = os.path.split(out_catchment)[0]
    arcpy.env.workspace = workspace
    arcpy.env.overwriteOutput = True

    if arcpy.Exists(in_dem) == False:
        arcpy.AddMessage("The input raster does not exist")
        quit()

    if os.path.splitext(out_catchment)[1].lower() == ".shp":
        # FieldOID = "FID"
        FieldOID = "ID"
        FlowDirection = os.path.join(workspace, "FlowDirection.tif")
        SinkRaster = os.path.join(workspace, "SinkRaster.tif")
        Watershed = os.path.join(workspace, "Watershed.tif")
        Catchment_tmp = os.path.join(workspace, "Catchment_tmp.shp")
        Catchment_select = os.path.join(workspace, "Catchment_select.shp")
        # Catchment_dissolve = os.path.join(workspace, "Catchment.shp")
    else:
        FieldOID = "OBJECTID"
        FlowDirection = os.path.join(workspace, "FlowDirection")
        SinkRaster = os.path.join(workspace, "SinkRaster")
        Watershed = os.path.join(workspace, "Watershed")
        Catchment_tmp = os.path.join(workspace, "Catchment")

    input_dem = arcpy.Raster(in_dem)
    flow_direction = arcpy.sa.FlowDirection(input_dem)
    flow_direction.save(FlowDirection)

    cell_size = input_dem.meanCellWidth
    arcpy.env.extent = input_dem.extent
    arcpy.PolygonToRaster_conversion(in_sink, FieldOID, SinkRaster,
                                     "CELL_CENTER", "NONE", cell_size)

    watershed = arcpy.sa.Watershed(flow_direction, SinkRaster, "Value")
    watershed.save(Watershed)

    arcpy.RasterToPolygon_conversion(watershed, Catchment_tmp, "NO_SIMPLIFY",
                                     "Value")
    field = "GRIDCODE"
    sqlExp = field + ">" + str(0)
    arcpy.Select_analysis(Catchment_tmp, Catchment_select, sqlExp)
    arcpy.Dissolve_management(
        Catchment_select,
        out_catchment,
        dissolve_field="GRIDCODE",
        statistics_fields="",
        multi_part="MULTI_PART",
        unsplit_lines="DISSOLVE_LINES",
    )

    area_field = "cat_area"
    arcpy.AddField_management(out_catchment, area_field, "DOUBLE")
    arcpy.CalculateField_management(out_catchment, area_field,
                                    "!shape.area@squaremeters!", "PYTHON_9.3",
                                    "#")

    arcpy.JoinField_management(in_sink,
                               in_field="ID",
                               join_table=Watershed,
                               join_field="Value",
                               fields="Count")
    arcpy.AddField_management(in_sink,
                              field_name="cat_area",
                              field_type="FLOAT")
    arcpy.CalculateField_management(
        in_sink,
        field="cat_area",
        expression="!Count_1! * math.pow(" + str(cell_size) + ",2)",
        expression_type="PYTHON_9.3",
    )
    arcpy.DeleteField_management(in_sink, drop_field="Count_1")
    arcpy.AddField_management(in_sink,
                              field_name="dep2catR",
                              field_type="FLOAT")
    arcpy.CalculateField_management(
        in_sink,
        field="dep2catR",
        expression="!AREA! / !cat_area!",
        expression_type="PYTHON_9.3",
    )
    arcpy.Delete_management(Catchment_tmp)
    arcpy.Delete_management(Catchment_select)

    # #add output data to map
    # mxd = MapDocument("CURRENT")
    # df = ListDataFrames(mxd, "*")[0]
    # lyr_watershed = Layer(Watershed)
    # AddLayer(df, lyr_watershed)

    return out_catchment
Exemplo n.º 10
0
                                    arcpy.SelectLayerByAttribute_management(
                                        fc_lyr, "ADD_TO_SELECTION",
                                        """"OWNER1" = '""" + str(o) + """'""")
                                except:
                                    edit = owner.replace("\'", "\\\'")
                                    # elif "\&" in owner:
                                    edit = edit.replace("\&", "\\\&")
                                    print edit
                                    truncate = edit[:-6]
                                    print truncate
                                    arcpy.SelectLayerByAttribute_management(
                                        fc_lyr, "NEW_SELECTION",
                                        """"OWNER1" LIKE '""" + truncate +
                                        """%'""")
                    owner_id += 1
                    arcpy.CalculateField_management(fc_lyr, owner_id_field,
                                                    owner_id)
                    print "Owner ID is: " + str(owner_id)
                    arcpy.AddMessage("Owner ID is: " + str(owner_id))
                else:
                    print "Owner ID is: " + str(anId)
                    arcpy.AddMessage("Owner ID is: " + str(anId))
        except:
            print "Cannot query owner."
            arcpy.AddMessage("Cannot query owner.")
            pass
arcpy.SelectLayerByAttribute_management(fc_lyr, "NEW_SELECTION",
                                        """"Owner_ID" = 0""")
arcpy.MakeFeatureLayer_management(fc_lyr, "fc_lyr_temp")
fieldList = arcpy.ListFields("fc_lyr_temp")
print "\n Creating address list..."
arcpy.AddMessage("\n Creating address list...")
Exemplo n.º 11
0
srch = arcpy.SearchCursor(sum)

r = next(srch)

min_std = r.getValue('MIN_avg_std')
max_std = r.getValue('MAX_avg_std')
min_g = r.getValue('MIN_g_score')
max_g = r.getValue('MAX_g_score')

del r, srch

# Process: Calculate Field
arcpy.CalculateField_management(
    inp, "avg_std_01",
    "round((!avg_std! - {min}) / ({max} - {min}), 2)".format(min=min_std,
                                                             max=max_std),
    "PYTHON_9.3")

# Process: Calculate Field (2)
arcpy.CalculateField_management(
    inp, "g_scr_01",
    "round((!g_score! - {min}) / ({max} - {min}), 2)".format(min=min_g,
                                                             max=max_g),
    "PYTHON_9.3")

# Process: Calculate Field (3)
arcpy.CalculateField_management(inp, "comp_01", "!avg_std_01! - !g_scr_01!",
                                "PYTHON_9.3", "")

arcpy.SetParameterAsText(1, inp)
Exemplo n.º 12
0
        # check to see if join already exists
        joinAdded = True
        fields = arcpy.ListFields(mupTable)
        for f in fields:
            if f.name.find('DescriptionOfMapUnits.Symbol') > -1:
                joinAdded = False
        # else add join
        if joinAdded:
            arcpy.AddJoin_management(mupTable, 'MapUnit', dmu, 'MapUnit')

        # get field names for Symbol, Label
        mupSymbol = os.path.basename(mup) + '.Symbol'
        mupLabel = os.path.basename(mup) + '.Label'
        # calculate Symbol
        arcpy.CalculateField_management(mupTable, mupSymbol,
                                        '!DescriptionOfMapUnits.Symbol!',
                                        'PYTHON')
        # calculate Label
        arcpy.CalculateField_management(mupTable, mupLabel,
                                        '!DescriptionOfMapUnits.Label!',
                                        'PYTHON')
        # calculate Label for IdentityConfidence <> 'certain'
        if inFds.find('CorrelationOfMapUnits') == -1:
            selectField = arcpy.AddFieldDelimiters(os.path.dirname(inFds),
                                                   'IdentityConfidence')
            arcpy.SelectLayerByAttribute_management(
                mupTable, 'NEW_SELECTION', selectField + " <> 'certain'")
            arcpy.CalculateField_management(
                mupTable, 'MapUnitPolys.Label',
                '!DescriptionOfMapUnits.Label! + "?"', 'PYTHON')
Exemplo n.º 13
0
            arcpy.Clip_analysis(DesFile, boundary, out_fc)
        else:
            arcpy.CopyFeatures_management(DesFile, out_fc)

        #   Now we are working with the new files in the Union gdb
        arcpy.env.workspace = Union_gdb

        fields = []
        if NameField:
            MyFunctions.check_and_add_field(NewFile, new_name, "TEXT",
                                            name_len)
            # If the two names differ only in case, the check_and_add_field function will have already copied the data across and
            # deleted the old field, so skip the calcuate field step
            if NameField.lower() <> new_name.lower():
                arcpy.CalculateField_management(NewFile, new_name,
                                                "!" + NameField + "!",
                                                "PYTHON_9.3")
            fields.append(new_name)

        if desc_field:
            DescField = row.getValue("DescField")
            if DescField:
                print("  Desc field " + DescField)
                MyFunctions.check_and_add_field(NewFile, desc_name, "TEXT",
                                                desc_len)
                if DescField.lower() <> desc_name.lower():
                    expression = "!" + DescField + "![:" + str(desc_len -
                                                               1) + "]"
                    arcpy.CalculateField_management(NewFile, desc_name,
                                                    expression, "PYTHON_9.3")
                fields.append(desc_name)
def route_data_mile(route, park, block):
    new_tbl = str(block)[:-4] + "_" + str(route)[:-4]
    arcpy.CopyRows_management(route, new_tbl)
    route_tbl = str(new_tbl) + "_tvw"
    arcpy.MakeTableView_management(new_tbl, route_tbl)

    # Export table with name then do additional fields per year or whatever
    arcpy.AddField_management(route_tbl, "GEOID10", "TEXT", "", "", 15,
                              "GEOID10")
    arcpy.AddField_management(route_tbl, "SITE", "TEXT", "", "", 75, "SITE")
    arcpy.AddField_management(route_tbl, "ACRES", "DOUBLE", "", "", "",
                              "ACRES")
    arcpy.AddField_management(route_tbl, "POP", "LONG", "", "", "", "POP")
    arcpy.AddField_management(route_tbl, "ACRE_PP", "DOUBLE", "", "", "",
                              "ACRE_PP")
    arcpy.AddField_management(route_tbl, "PARK_PP", "DOUBLE", "", "", "",
                              "PARK_PP")

    expression1 = "(!Name![0:15])"
    expression2 = "(!Name![18:])"
    expression3 = "(!SITE![:-6])"
    arcpy.CalculateField_management(route_tbl, "GEOID10", expression1,
                                    "PYTHON_9.3")
    arcpy.CalculateField_management(route_tbl, "SITE", expression2,
                                    "PYTHON_9.3")
    arcpy.CalculateField_management(route_tbl, "SITE", expression3,
                                    "PYTHON_9.3")

    arcpy.AddJoin_management(route_tbl, "SITE", park, "NAME")
    field_name_1 = str(park)[:-4]
    expression4 = "(" + "!" + field_name_1 + ".MAP_ACRES!" + ")"
    arcpy.CalculateField_management(route_tbl, "ACRES", expression4,
                                    "PYTHON_9.3")
    arcpy.RemoveJoin_management(route_tbl)

    arcpy.AddJoin_management(route_tbl, "GEOID10", block, "GEOID10")
    field_name_2 = str(block)[:-4]
    expression5 = "(" + "!" + field_name_2 + ".POP!" + ")"
    arcpy.CalculateField_management(route_tbl, "POP", expression5,
                                    "PYTHON_9.3")
    arcpy.RemoveJoin_management(route_tbl)

    # Deletes rows where GEOID10 AND SITE are duplicates
    arcpy.DeleteIdentical_management(route_tbl, ["GEOID10", "SITE"])

    # summarize SITE by ACRES & POP
    site_tbl = str(route_tbl) + "_stats"
    arcpy.Statistics_analysis(route_tbl, site_tbl,
                              [["ACRES", "MEAN"], ["POP", "SUM"]], "SITE")

    # calculate acres/person & site/person for each park
    arcpy.AddField_management(site_tbl, "ACRE_PP", "DOUBLE", "", "", "",
                              "ACRE_PP")
    arcpy.AddField_management(site_tbl, "PARK_PP", "DOUBLE", "", "", "",
                              "PARK_PP")
    expression6 = "(!MEAN_ACRES!/!SUM_POP!)"
    expression7 = "(1/!SUM_POP!)"
    arcpy.CalculateField_management(site_tbl, "ACRE_PP", expression6,
                                    "PYTHON_9.3")
    arcpy.CalculateField_management(site_tbl, "PARK_PP", expression7,
                                    "PYTHON_9.3")

    arcpy.AddJoin_management(route_tbl, "SITE", site_tbl, "SITE")
    expression8 = "(!" + site_tbl + ".ACRE_PP!)"
    expression9 = "(!" + site_tbl + ".PARK_PP!)"
    arcpy.CalculateField_management(route_tbl, "ACRE_PP", expression8,
                                    "PYTHON_9.3")
    arcpy.CalculateField_management(route_tbl, "PARK_PP", expression9,
                                    "PYTHON_9.3")
    arcpy.RemoveJoin_management(route_tbl)

    # Summarize route layer by GEOID
    geoid_tbl = str(route_tbl) + "_geoidStats"
    arcpy.Statistics_analysis(route_tbl, geoid_tbl,
                              [["ACRE_PP", "SUM"], ["PARK_PP", "SUM"]],
                              "GEOID10")

    # join back to block and calculate fields
    arcpy.AddJoin_management(block, "GEOID10", geoid_tbl, "GEOID10")
    expression10 = "(!" + geoid_tbl + ".SUM_ACRE_PP!)"
    expression11 = "(!" + geoid_tbl + ".SUM_PARK_PP!)"
    arcpy.CalculateField_management(block, "ACRE_PP", expression10,
                                    "PYTHON_9.3")
    arcpy.CalculateField_management(block, "PARK_PP", expression11,
                                    "PYTHON_9.3")
    arcpy.RemoveJoin_management(block)

    with arcpy.da.UpdateCursor(block, ["ACRE_PP", "PARK_PP"]) as cursor:
        for row in cursor:
            if row[0] is None:
                row[0] = 0
            if row[1] is None:
                row[1] = 0
            cursor.updateRow(row)
            del row
    del cursor
    return
Exemplo n.º 15
0
arcpy.env.parallelProcessingFactor = "100%"
arcpy.env.workspace = ram
albers = arcpy.SpatialReference()
albers.factoryCode = 102039
albers.create()
arcpy.env.outputCoordinateSystem = albers

# Make a fc of selected wetlands
nwifilter = """ "ATTRIBUTE" LIKE 'P%' """
arcpy.MakeFeatureLayer_management(nwi, "nwi_lyr")
arcpy.SelectLayerByAttribute_management("nwi_lyr", "NEW_SELECTION", nwifilter)
arcpy.CopyFeatures_management("nwi_lyr", "allwetpre")

# Add and calculate CSI ID number field
arcpy.AddField_management("allwetpre", "CSI_ID", "LONG")
arcpy.CalculateField_management("allwetpre", "CSI_ID", "!OBJECTID!", "PYTHON")

# Add field for hectares and calculate
arcpy.AddField_management("allwetpre", "WetHa", "DOUBLE")

# Calculate geometry for wetland hectares.
arcpy.CalculateField_management("allwetpre", "WetHa", "!shape.area@hectares!", "PYTHON")

# Buffer a donut around selected wetland polys 30m
arcpy.Buffer_analysis("allwetpre", "allwet", "30 meters", "OUTSIDE_ONLY")

# Add wetland order field for connected wetlands
arcpy.AddField_management("allwet","WetOrder", "TEXT")

# Spatial join connected wetlands and streams
##################Field Maps########################
Exemplo n.º 16
0
                    "", "", "NO_PRESERVE_SHAPE", "", "NO_VERTICAL")
                #Process : Adding Field
                print 'Create Format Fields for Feature ' + output
                print jens
                if jens == "1":
                    print "type one"
                    # objID
                    arcpy.AddField_management(output, "objID", "SHORT", "", "",
                                              "", "", "NULLABLE",
                                              "NON_REQUIRED", "")

                    # objType
                    arcpy.AddField_management(output, "objType", "TEXT", "",
                                              "", "", "", "NULLABLE",
                                              "NON_REQUIRED", "")
                    arcpy.CalculateField_management(output, "objType", types,
                                                    "VB", "")
                    # objYear
                    arcpy.AddField_management(output, "objYear", "SHORT", "",
                                              "", "", "", "NULLABLE",
                                              "NON_REQUIRED", "")
                    arcpy.CalculateField_management(output, "objYear", thn,
                                                    "VB", "")
                    # wapName
                    arcpy.AddField_management(output, "wapName", "TEXT", "",
                                              "", "", "", "NULLABLE",
                                              "NON_REQUIRED", "")
                    arcpy.CalculateField_management(output, "wapName", pro,
                                                    "VB", "")
                    # wakName
                    arcpy.AddField_management(output, "wakName", "TEXT", "",
                                              "", "", "", "NULLABLE",
Exemplo n.º 17
0
            arcpy.AlterField_management(fc, field.name, 'DD_X', 'DD_X')
        if field.name == 'LAT_NHD':
            arcpy.AlterField_management(fc, field.name, 'DD_Y', 'DD_Y')
        if field.name == 'GAGE_ID':
            arcpy.AlterField_management(fc, field.name, 'CONAGUA_ID',
                                        'CONAGUA_ID')
        if field.name == 'USGSID':
            arcpy.AlterField_management(fc, field.name, 'USGS_ID', 'USGS_ID')
        if field.name == 'NWISWEB':
            arcpy.AlterField_management(fc, field.name, 'HISTORICAL',
                                        'HISTORICAL')

# Create historical link
    if fc.endswith("conagua"):
        expression1 = "!LINK!+str(!CONAGUA_ID!)+!MDB!"
        arcpy.CalculateField_management(fc, fieldHistorical, expression1,
                                        "PYTHON_9.3")
    if fc.endswith("ibwc"):
        for field in [fieldWebPre, fieldWebPost]:
            arcpy.AddField_management(fc, field, fieldTypeTx)
        cur = arcpy.UpdateCursor(fc)
        for row in cur:
            row.setValue(fieldWebPre, 'http://www.ibwc.gov/wad/')
            row.setValue(fieldWebPost, '.htm')
            cur.updateRow(row)
        expression2 = "!WEB!+!BINARY_ID!+!HTM!"
        arcpy.MakeFeatureLayer_management(fc, "temp")
        arcpy.SelectLayerByAttribute_management("temp", 'NEW_SELECTION',
                                                "\"BINARY_ID\" <> \'\'")
        arcpy.CalculateField_management("temp", fieldHistorical, expression2,
                                        "PYTHON_9.3")
Exemplo n.º 18
0
    arcpy.Union_analysis(infeatures, Union1, "ALL", "", "GAPS")

    #Delete overlapping habitat
    newField2 = arcpy.AddFieldDelimiters(arcpy.env.workspace, "FIRE_YEAR_1")
    SQLExpr3 = str(newField2) + " = " + str(FireYear)
    arcpy.MakeFeatureLayer_management(Union1, "Union2")
    #select row with overlapping habitat and delete row
    arcpy.SelectLayerByAttribute_management("Union2", "NEW_SELECTION",
                                            SQLExpr3)
    arcpy.DeleteRows_management("Union2")

    #create feature class from layer file
    arcpy.CopyFeatures_management("Union2", "Union3")
    #Append leftover habitat to target table
    TargetTable = Outwksp + "\HabRecover40yr"
    arcpy.Append_management("Union3", TargetTable)
    arcpy.Delete_management("FireHist2")

#Calculate Area in hectares of leftover habitat for every year in TargetTable

arcpy.env.outputCoordinateSystem = arcpy.Describe(TargetTable).spatialReference
arcpy.AddGeometryAttributes_management(TargetTable, "AREA", "KILOMETERS",
                                       "HECTARES", "")

arcpy.AddField_management(TargetTable, "PercentRnge", "DOUBLE", "", "", "")

arcpy.CalculateField_management(TargetTable, "PercentRnge",
                                "[POLY_AREA] / 44165456194.3", "VB")

#print "script complete"
Exemplo n.º 19
0
from arcpy import env

# Set workspace
arcpy.env.workspace = workspace
arcpy.env.overwriteOutput = True

# Point Aggregation
arcpy.CopyFeatures_management(path, "areas_new")
arcpy.FeatureToPoint_management("areas_new", "aggr_points")
arcpy.AddXY_management("aggr_points")
arcpy.JoinField_management("areas_new","OBJECTID","aggr_points","ORIG_FID","#")
arcpy.DeleteField_management("areas_new","ORIG_FID;RORI_1")
arcpy.AddXY_management("o_points")
arcpy.AddField_management("o_points","origX","FLOAT","#","#","#","#","NULLABLE","NON_REQUIRED","#")
arcpy.AddField_management("o_points","origY","FLOAT","#","#","#","#","NULLABLE","NON_REQUIRED","#")
arcpy.CalculateField_management("o_points","origX","[POINT_X]","VB","#")
arcpy.CalculateField_management("o_points","origY","[POINT_Y]","VB","#")
arcpy.DeleteField_management("o_points","POINT_X;POINT_Y")
arcpy.SpatialJoin_analysis("o_points","areas_new","o_points1","JOIN_ONE_TO_ONE","KEEP_ALL","origX origX true true false 4 Float 0 0 ,First,#,o_points,origX,-1,-1;origY origY true true false 4 Float 0 0 ,First,#,o_points,origY,-1,-1;POINT_X POINT_X true true false 8 Double 0 0 ,First,#,areas_new,POINT_X,-1,-1;POINT_Y POINT_Y true true false 8 Double 0 0 ,First,#,areas_new,POINT_Y,-1,-1","INTERSECT","#","#")
arcpy.DeleteField_management("areas_new","Point_X;Point_Y")
arcpy.DeleteField_management("o_points1","Join_Count;TARGET_FID")
arcpy.AddField_management("o_points1","MaskedX","FLOAT","#","#","#","#","NULLABLE","NON_REQUIRED","#")
arcpy.AddField_management("o_points1","MaskedY","FLOAT","#","#","#","#","NULLABLE","NON_REQUIRED","#")
arcpy.CalculateField_management("o_points1","MaskedX","[POINT_X]","VB","#")
arcpy.CalculateField_management("o_points1","MaskedY","[POINT_Y]","VB","#")
arcpy.DeleteField_management("o_points1","POINT_X;POINT_Y")
arcpy.MakeXYEventLayer_management("o_points1","MaskedX","MaskedY","event_points")
arcpy.CopyFeatures_management("event_points","m_points1")
arcpy.CopyFeatures_management("m_points1","m_points")

# Delete unnecessary files
    localWorkspace = 'E:\\LI_PLAN_REVIEW_FLAGS\\Workspace.gdb'

    inMemory = 'in_memory'

    arcpy.env.workspace = localWorkspace
    arcpy.env.overwriteOutput = True


    #Iterate through Council District Polygons
    currentTract = 'CurrentDistrict'
    tempParcels = 'TempParcels'
    tempZone = 'TempZone'
    districtCount = 0
    districtTotal = int(arcpy.GetCount_management(Council_Districts_Local).getOutput(0))
    arcpy.CalculateField_management(Council_Districts_Local, 'DISTRICT', '!DISTRICT!.strip(' ')', 'PYTHON_9.3')
    districtTileCursor = arcpy.da.SearchCursor(Council_Districts_Local, 'DISTRICT')
    parcelDict = {}
    for tract in districtTileCursor:
        memory = inMemory
        districtCount += 1
        print('Processing District ' + tract[0] + "\n" + str((float(districtCount) / float(districtTotal)) * 100.0) + '% Complete')
        arcpy.MakeFeatureLayer_management(localWorkspace + '\\' + Council_Districts_Local, currentTract, "DISTRICT = '" + tract[0] + "'")
        if arcpy.Exists(tempZone):
            arcpy.Delete_management(tempZone)
        arcpy.Clip_analysis(localWorkspace + '\\' + zoningFC, currentTract, tempZone)
        if arcpy.Exists(tempParcels):
            arcpy.Delete_management(tempParcels)
        arcpy.Clip_analysis(localWorkspace + '\\' + PWD_Parcels_Working, currentTract, tempParcels)
        IntersectOutput = localWorkspace + '\\' + zoningFC + '_Int'
        print('Running Intersect')
Exemplo n.º 21
0
    elif e_alias == 'OBJECTID':
        del e_alias
    else:
        arcpy.AddField_management(survey_result_table,
                                  e_name,
                                  e_type,
                                  field_alias=e_alias)
        print("Added {}".format(e_name))
        # TODO use concurrent cursors instead of this crap
        new_table = arcpy.ListFields(survey_result_table)
        # a = 0
        for n in new_table:
            # print("{}: {}".format(n.name, n.aliasName))
            if n.aliasName == e_alias:
                print(n.aliasName, e_alias)

                # a += 1
                # print(a)
                expression = '!{}!'.format(e_name)
                arcpy.CalculateField_management(survey_result_table,
                                                e_name,
                                                expression,
                                                'PYTHON3')
                print("{} Calculated".format(e_name))

new_fields = arcpy.ListFields(survey_result_table)
for n in new_fields:
    print(n.name)
for r in export_dict:
    d_name = r[1]
Exemplo n.º 22
0
    ncurrentstep += 1
    arcpy.AddMessage("Generating XY layer - Step " + str(ncurrentstep) + "/" +
                     str(nstep))
    SpatialRef = arcpy.Describe(Polygon).spatialReference
    ProxyPtsTEMP = arcpy.MakeXYEventLayer_management("ProxyTable", "NEAR_X",
                                                     "NEAR_Y", "ProxyPtsTEMP",
                                                     SpatialRef, "")

    ncurrentstep += 1
    arcpy.AddMessage("Final point shapefile with width information - Step " +
                     str(ncurrentstep) + "/" + str(nstep))
    WidthPts = arcpy.CopyFeatures_management(ProxyPtsTEMP, Output)
    arcpy.AddField_management(WidthPts, "Width", "DOUBLE", "", "", "", "",
                              "NULLABLE", "NON_REQUIRED", "")
    arcpy.CalculateField_management(WidthPts, "Width", "!NEAR_DIST!*2",
                                    "PYTHON_9.3", "")
    arcpy.DeleteField_management(WidthPts, "NEAR_DIST")

    #/deleting residual fields
    try:
        arcpy.DeleteField_management(WidthPts, ["IN_FID"])
    except:
        pass

    #===============================================================================
    # DELETING TEMPORARY FILES
    #===============================================================================
    if str(DeleteTF) == "true":
        ncurrentstep += 1
        arcpy.AddMessage("Deleting temporary files - Step " +
                         str(ncurrentstep) + "/" + str(nstep))
Exemplo n.º 23
0
    def dist_matrix(fac, st_network, mode, direction, fishnet, num_to_find=5):
        arcpy.AddMessage("...generating distance matrix")

        # Initialize points_dict
        points_dict = {}
        with arcpy.da.SearchCursor(fishnet, ['OID@', 'VALUE']) as search_rows:
            for row in search_rows:
                points_dict[row[0]] = [row[1], False, []]

        # Initialize network analysis
        # Create a new closest facility analysis layer.
        arcpy.AddMessage(" ... initializing closest facility analysis")
        closest_fac_lyr_obj = arcpy.na.MakeClosestFacilityAnalysisLayer(
            st_network,
            "Closest_Facility",
            mode,
            direction,
            number_of_facilities_to_find=num_to_find).getOutput(0)
        # Sublayer names
        sublayer_names = arcpy.na.GetNAClassNames(closest_fac_lyr_obj)
        cf_fac_lyr_name = sublayer_names["Facilities"]
        cf_incidents_lyr_name = sublayer_names["Incidents"]
        cf_routes_lyr_name = sublayer_names["CFRoutes"]

        # Load facilities
        arcpy.na.AddLocations(closest_fac_lyr_obj, cf_fac_lyr_name, fac)
        # Load incidents
        arcpy.na.AddLocations(closest_fac_lyr_obj, cf_incidents_lyr_name,
                              fishnet)

        arcpy.na.Solve(closest_fac_lyr_obj)

        # Copy object ID to a new field for later spatial join
        arcpy.CalculateField_management(fishnet, "FishnetID", "!OBJECTID!",
                                        "PYTHON3")
        arcpy.CalculateField_management(fac, "FacID", "!OBJECTID!", "PYTHON3")

        # Spatial join to associate loactions with original ID
        fac_join = cf_fac_lyr_name + "_join"
        inc_join = cf_incidents_lyr_name + "_join"
        arcpy.SpatialJoin_analysis(cf_fac_lyr_name,
                                   fac,
                                   fac_join,
                                   match_option="CLOSEST")
        arcpy.SpatialJoin_analysis(cf_incidents_lyr_name,
                                   fishnet,
                                   inc_join,
                                   match_option="CLOSEST")

        # Join facility and fishnet ID to routes.
        routes = cf_routes_lyr_name + "_copy"
        arcpy.CopyFeatures_management(cf_routes_lyr_name, routes)
        arcpy.JoinField_management(routes, "FacilityID", fac_join,
                                   "TARGET_FID", ["FacID"])
        arcpy.JoinField_management(routes, "IncidentID", inc_join,
                                   "TARGET_FID", ["FishnetID"])

        # Populate points_dict with distance.
        # Using heap to keep it sorted.
        with arcpy.da.SearchCursor(
                routes, ['FishnetID', 'FacID', 'Total_Length']) as search_rows:
            for row in search_rows:
                heapq.heappush(points_dict[int(row[0])][2], (row[2], row[1]))

        arcpy.Delete_management(closest_fac_lyr_obj)
        arcpy.Delete_management(
            os.path.join(arcpy.env.workspace, "ClosestFacility"))
        arcpy.Delete_management(routes)
        arcpy.Delete_management(fac_join)
        arcpy.Delete_management(inc_join)

        return points_dict
import os
import sys

import arcpy

try:
    import helpers
except ImportError:
    sys.path.extend(os.path.dirname(__file__))
    import helpers

input_fc = arcpy.GetParameterAsText(0)

arcpy.AddField_management(input_fc, 'SEGMENT_ID', 'LONG')

fields = helpers.set_curve_geom_number(input_fc, is_ground_truth=True)

arcpy.CalculateField_management(input_fc, 'SEGMENT_ID', '[uid]')

desc = arcpy.Describe(input_fc)

fields.extend([str(desc.OIDFieldName), str(desc.ShapeFieldName), 'SEGMENT_ID'])

for f in desc.fields:
    if str(f.name) not in fields:
        arcpy.DeleteField_management(input_fc, f.name)


output_fc = arcpy.SetParameterAsText(1, input_fc)

Exemplo n.º 25
0
                      encoding='utf-8')


# Convert area of interest to polygon without simplifying
arcpy.AddMessage("Formatting absence cover data...")
arcpy.RasterToPolygon_conversion(area_of_interest, aoi_poly, "NO_SIMPLIFY",
                                 "VALUE")

# Erase survey sites in which the target taxon was present and clip the absence sites to the area of interest
arcpy.Erase_analysis(survey_sites, cover_feature, survey_erase, "")
arcpy.Clip_analysis(survey_erase, aoi_poly, absence_sites, "")

# Add a cover field to the absence sites with the cover value set to 0
arcpy.AddField_management(absence_sites, "cover", "DOUBLE", "", "", "", "",
                          "NULLABLE", "NON_REQUIRED", "")
arcpy.CalculateField_management(absence_sites, "cover", 0, "PYTHON", "")

# Add a project field to the absence sites with the value set to initialProject
arcpy.AddField_management(absence_sites, "project", "TEXT", "", "", "", "",
                          "NULLABLE", "NON_REQUIRED", "")
arcpy.CalculateField_management(absence_sites, "project", "!initialProject!",
                                "PYTHON", "")

# Clip the cover feature to the area of interest
arcpy.AddMessage("Formatting presence cover data...")
arcpy.Clip_analysis(cover_feature, aoi_poly, presence_sites, "")

# Delete unmatched fields from presence and absence datasets
arcpy.DeleteField_management(
    presence_sites,
    "abundanceID;vegObserver1;vegObserver2;nameAccepted;tsnITIS")
Exemplo n.º 26
0
raster_dict = {
    'nepac1': ['depth', 'depth_m'],
    'nepac1_slope': ['slope', 'slope_deg'],
    'nepac1_aspect': ['aspect', 'aspect_deg']
}
near_dict = {
    'Contour_nepac': ['isobath_dist_m', 'DeleteMe'],
    'cntry_06': ['shore_dist_m', 'shore_angle_deg']
}
# clear selections
arcpy.SelectLayerByAttribute_management(point_layer, 'CLEAR_SELECTION')

# add buffer field
arcpy.AddField_management(point_layer, "buffer50km", "Text", "", "", 16)
# set default value to "Outside"
arcpy.CalculateField_management(point_layer, "buffer50km", "'Outside'",
                                "PYTHON")
for poly in poly_list:
    # select points in polygon
    arcpy.SelectLayerByLocation_management(point_layer, "INTERSECT", poly)
    # find value to insert for selected points
    poly_name = poly_dict.get(poly)
    # insert value
    arcpy.CalculateField_management(point_layer, "buffer50km", poly_name,
                                    "PYTHON")
# clear selections
arcpy.SelectLayerByAttribute_management(point_layer, 'CLEAR_SELECTION')

# extract point values
in_name = point_layer
for raster in raster_list:
    # new output feature class
Exemplo n.º 27
0
    ### performing the zonal statistics on negative change
    #print "Calculating the total negative change by admin"
    arcpy.AddMessage("Calculating the total negative change by admin")
    nfile_change_negative = "zstat_" + str(zone_field) +"_LCchange_negative.dbf"
    outZSaTNegativo = ZonalStatisticsAsTable(admin_shapefile, zone_field, raster_LandChange_negative,
                                     nfile_change_negative,"DATA", "SUM")

    #calculating the percentage of negative change by admin
    nome_campo = "nch" + str(indice)
    arcpy.AddField_management(nfile_change_negative, nome_campo , "FLOAT","", "", "", "", "NULLABLE")

    codeblock = """def test(sum, count):
            if sum < 0:
                var = - (sum / count * 100)
            else:
                var = 0
            return var"""

    arcpy.CalculateField_management(nfile_change_negative, nome_campo,"test( !SUM!, !COUNT!)","PYTHON_9.3", codeblock)

    if indice == 1:
        arcpy.JoinField_management(admin_shapefile, zone_field, nfile_change_negative, zone_field, ["nch1"])
    if indice == 2:
        arcpy.JoinField_management(admin_shapefile, zone_field, nfile_change_negative, zone_field, ["nch2"])
    if indice == 3:
        arcpy.JoinField_management(admin_shapefile, zone_field, nfile_change_negative, zone_field, ["nch3"])

    arcpy.AddMessage("Processing succeeded. Outputs raster: " + workspace + ". Admin compiled: " + admin_shapefile)

    indice += 1
Exemplo n.º 28
0
def select_pour_points(nhd_gdb,
                       subregion_dem,
                       out_dir,
                       projection=arcpy.SpatialReference(102039)):
    # Preliminary environmental settings:
    env.snapRaster = subregion_dem
    env.extent = subregion_dem
    env.cellSize = 10
    env.pyramid = "PYRAMIDS -1 SKIP_FIRST"
    env.outputCoordinateSystem = projection
    arcpy.CheckOutExtension("Spatial")

    waterbody = os.path.join(nhd_gdb, 'NHDWaterbody')
    flowline = os.path.join(nhd_gdb, 'NHDFlowline')

    # Make a folder for the pour points
    huc4_code = re.search('\d{4}', os.path.basename(nhd_gdb)).group()
    pour_dir = os.path.join(out_dir, 'pourpoints{0}'.format(huc4_code))
    pour_gdb = os.path.join(pour_dir, 'pourpoints.gdb')
    if not os.path.exists(pour_dir):
        os.mkdir(pour_dir)
    if not arcpy.Exists(pour_gdb):
        arcpy.CreateFileGDB_management(pour_dir, 'pourpoints.gdb')

    env.workspace = pour_gdb

    # Make a layer from NHDWaterbody feature class and select out lakes smaller than a hectare. Project to EPSG 102039.
    fcodes = (39000, 39004, 39009, 39010, 39011, 39012, 43600, 43613, 43615,
              43617, 43618, 43619, 43621)
    where_clause = '''("AreaSqKm" >=0.04 AND "FCode" IN %s) OR ("FCode" = 43601 AND "AreaSqKm" >= 0.1)''' % (
        fcodes, )

    arcpy.Select_analysis(waterbody, 'eligible_lakes', where_clause)

    # Make a shapefile from NHDFlowline and project to EPSG 102039
    arcpy.CopyFeatures_management(flowline, 'eligible_flowlines')

    # Add field to flowline_albers and waterbody_albers then calculate unique identifiers for features.
    ##    # Flowlines get positive values, waterbodies get negative
    ##    # this will help us to know which is which later
    # Calculate lakes pour_id first, then add maximum to streams pour_ids to get unique ids for all
    arcpy.AddField_management('eligible_lakes', "POUR_ID", "LONG")
    arcpy.CalculateField_management('eligible_lakes', "POUR_ID", '!OBJECTID!',
                                    "PYTHON")
    pour_ids = []
    with arcpy.da.SearchCursor('eligible_lakes', ['POUR_ID']) as cursor:
        for row in cursor:
            pour_ids.append(row[0])
    pour_id_offset = max(pour_ids)
    arcpy.AddField_management('eligible_flowlines', "POUR_ID", "LONG")
    arcpy.CalculateField_management('eligible_flowlines', "POUR_ID",
                                    '!OBJECTID! + {}'.format(pour_id_offset),
                                    "PYTHON")

    # these must be saved as tifs for the mosiac nodata values to work with the watersheds tool
    flowline_raster = os.path.join(pour_dir, "flowline_raster.tif")
    lakes_raster = os.path.join(pour_dir, "lakes_raster.tif")
    arcpy.PolylineToRaster_conversion('eligible_flowlines', "POUR_ID",
                                      flowline_raster, "", "", 10)
    arcpy.PolygonToRaster_conversion('eligible_lakes', "POUR_ID", lakes_raster,
                                     "", "", 10)

    # Mosaic the rasters together favoring waterbodies over flowlines.
    arcpy.MosaicToNewRaster_management([flowline_raster, lakes_raster],
                                       pour_dir, "pour_points.tif", projection,
                                       "32_BIT_UNSIGNED", "10", "1", "LAST",
                                       "LAST")
                                           "Overwrite")
arcpy.AddField_management(OUTPUTPATH + OUTPUTNAME + ".tif", "dengji", "STRING",
                          "", "", "")
arcpy.AddField_management(OUTPUTPATH + OUTPUTNAME + ".tif", "mj", "DOUBLE", "",
                          "", "")
arcpy.AddField_management(OUTPUTPATH + OUTPUTNAME + ".tif", "fbl", "DOUBLE",
                          "", "", "")
expression = "getClass(!VALUE!)"
codeblock = """def getClass(a):
    if a == 1:
        return u"极敏感"
    if a == 3:
        return u"高度敏感"
    else:
        return u'一般敏感'"""
arcpy.CalculateField_management(OUTPUTPATH + OUTPUTNAME + ".tif", "fbl", FBLL,
                                "PYTHON_9.3")
arcpy.CalculateField_management(OUTPUTPATH + OUTPUTNAME + ".tif", "dengji",
                                expression, "PYTHON_9.3", codeblock)
arcpy.CalculateField_management(OUTPUTPATH + OUTPUTNAME + ".tif", "mj",
                                "(!fbl!)*(!COUNT!)", "PYTHON_9.3")
arcpy.DeleteField_management(OUTPUTPATH + OUTPUTNAME + ".tif", "fbl")
arcpy.Delete_management("haqsshp.shp")
arcpy.Delete_management("smhshp.shp")
arcpy.Delete_management("stlsshp.shp")
arcpy.Delete_management("tdshshp.shp")
arcpy.Delete_management("stmgshp.shp")
arcpy.Delete_management("stmg.tif")
if arcpy.Exists("tihuan1.tif"):
    arcpy.Delete_management("tihuan1.tif")
if arcpy.Exists("tihuan.tif"):
    arcpy.Delete_management("tihuan.tif")
Exemplo n.º 30
0
def main(gis_ws, input_soil_ws, cdl_year, zone_type='huc8',
         overwrite_flag=False, cleanup_flag=False):
    """Calculate zonal statistics needed to run ET-Demands model

    Args:
        gis_ws (str): Folder/workspace path of the GIS data for the project
        input_soil_ws (str): Folder/workspace path of the common soils data
        cdl_year (int): Cropland Data Layer year
        zone_type (str): Zone type (huc8, huc10, county)
        overwrite_flag (bool): If True, overwrite existing files
        cleanup_flag (bool): If True, remove temporary files

    Returns:
        None

    """
    logging.info('\nCalculating ET-Demands Zonal Stats')

    # DEADBEEF - Hard code for now
    if zone_type == 'huc10':
        zone_path = os.path.join(gis_ws, 'huc10', 'wbdhu10_albers.shp')
        zone_id_field = 'HUC10'
        zone_name_field = 'HUC10'
        zone_name_str = 'HUC10 '
    elif zone_type == 'huc8':
        zone_path = os.path.join(gis_ws, 'huc8', 'wbdhu8_albers.shp')
        zone_id_field = 'HUC8'
        zone_name_field = 'HUC8'
        zone_name_str = 'HUC8 '
    elif zone_type == 'county':
        zone_path = os.path.join(
            gis_ws, 'counties', 'county_nrcs_a_mbr_albers.shp')
        zone_id_field = 'COUNTYNAME'
        zone_name_field = 'COUNTYNAME'
        zone_name_str = ''
    elif zone_type == 'gridmet':
        zone_path = os.path.join(
            gis_ws, 'gridmet', 'gridmet_4km_cells_albers.shp')
        zone_id_field = 'GRIDMET_ID'
        zone_name_field = 'GRIDMET_ID'
        zone_name_str = 'GRIDMET_ID '
    # elif zone_type == 'nldas':
    #     zone_path = os.path.join(
    #         gis_ws, 'counties', 'county_nrcs_a_mbr_albers.shp')
    #     zone_id_field = 'NLDAS_ID'
    #     zone_name_field = 'NLDAS_ID'
    #     zone_name_str = 'NLDAS_4km_'

    et_cells_path = os.path.join(gis_ws, 'ETCells.shp')
    # if gdb_flag:
    #     _path = os.path.join(
    #        os.path.dirname(gis_ws), 'et-demands_py\et_demands.gdb')
    #     _cells_path = os.path.join(gdb_path, 'et_cells')
    # else:
    #     _cells_path = os.path.join(gis_ws, 'ETCells.shp')

    cdl_ws = os.path.join(gis_ws, 'cdl')
    soil_ws = os.path.join(gis_ws, 'soils')
    zone_ws = os.path.dirname(zone_path)

    agland_path = os.path.join(
        cdl_ws, 'agland_{}_30m_cdls.img'.format(cdl_year))
    agmask_path = os.path.join(
        cdl_ws, 'agmask_{}_30m_cdls.img'.format(cdl_year))
    table_fmt = 'zone_{}.dbf'

    # ET cell field names
    cell_lat_field = 'LAT'
    cell_lon_field = 'LON'
    cell_id_field = 'CELL_ID'
    cell_name_field = 'CELL_NAME'
    station_id_field = 'STATION_ID'
    awc_field = 'AWC'
    clay_field = 'CLAY'
    sand_field = 'SAND'
    awc_in_ft_field = 'AWC_IN_FT'
    hydgrp_num_field = 'HYDGRP_NUM'
    hydgrp_field = 'HYDGRP'

    # active_flag_field = 'ACTIVE_FLAG'
    # irrig_flag_field = 'IRRIGATION_FLAG'
    # permeability_field = 'PERMEABILITY'
    # soil_depth_field = 'SOIL_DEPTH'
    # aridity_field = 'ARIDITY'
    # dairy_cutting_field = 'DAIRY_CUTTINGS'
    # beef_cutting_field = 'BEEF_CUTTINGS'

    # active_flag_default = 1
    # irrig_flag_default = 1
    # permeability_default = -999
    # soil_depth_default = 60         # inches
    # aridity_default = 50
    # dairy_cutting_default = 3
    # beef_cutting_default = 2

    # Output names/paths
    zone_proj_name = 'zone_proj.shp'
    zone_raster_name = 'zone_raster.img'
    table_ws = os.path.join(gis_ws, 'zone_tables')

    #
    snap_raster = os.path.join(cdl_ws, '{}_30m_cdls.img'.format(cdl_year))
    # snap_cs = 30
    sqm_2_acres = 0.000247105381        # From google

    # Link ET demands crop number (1-84) with CDL values (1-255)
    # Key is CDL number, value is crop number, comment is CDL class name
    # Crosswalk values are coming from cdl_crosswalk.csv and being upacked into a dictionary
    # Allows user to modify crosswalk in excel
    # Pass in crosswalk file as an input argument
    crosswalk_file = os.path.join(
        os.path.dirname(os.path.abspath(__file__)), 'cdl_crosswalk_usbrmod.csv')
    cross = pd.read_csv(crosswalk_file)

    # Add Try and Except for header names, unique crop numbers, etc.
    crop_num_dict = dict()
    for index, row in cross.iterrows():
        crop_num_dict[int(row.cdl_no)] = list(
            map(int, str(row.etd_no).split(',')))
    logging.debug(crop_num_dict)

    # REMOVE LATER AFTER TESTING ABOVE
    # Link ET demands crop number (1-84) with CDL values (1-255)
    # Key is CDL number, value is crop number, comment is CDL class name
    # crop_num_dict = dict()
    # crop_num_dict[1] = [7]     # Corn -> Field Corn
    # crop_num_dict[2] = [58]    # Cotton -> Cotton
    # crop_num_dict[3] = [65]    # Rice -> Rice
    # crop_num_dict[4] = [60]    # Sorghum -> Sorghum
    # crop_num_dict[5] = [66]    # Soybeans -> Soybeans
    # crop_num_dict[6] = [36]    # Sunflower -> Sunflower -irrigated
    # crop_num_dict[10] = [67]   ## Peanuts -> Peanuts
    # crop_num_dict[11] = [36]   ## Tobacco -> Sunflower -irrigated
    # crop_num_dict[12] = [9]    # Sweet Corn -> Sweet Corn Early Plant
    # crop_num_dict[13] = [7]     # Pop or Orn Corn -> Field Corn
    # crop_num_dict[14] = [33]    # Mint -> Mint
    # crop_num_dict[21] = [11]    # Barley -> Spring Grain - irrigated
    # crop_num_dict[22] = [11]    # Durum Wheat -> Spring Grain - irrigated
    # crop_num_dict[23] = [11]    # Spring Wheat -> Spring Grain - irrigated
    # crop_num_dict[24] = [13]    # Winter Wheat -> Winter Grain - irrigated
    # crop_num_dict[25] = [11]    # Other Small Grains -> Spring Grain - irrigated
    # crop_num_dict[26] = [13, 85]    # Dbl Crop WinWht/Soybeans -> Soybeans After Another Crop
    # crop_num_dict[27] = [11]    # Rye -> Spring Grain - irrigated
    # crop_num_dict[28] = [11]    # Oats -> Spring Grain - irrigated
    # crop_num_dict[29] = [68]    # Millet -> Millet
    # crop_num_dict[30] = [11]    # Speltz -> Spring Grain - irrigated
    # crop_num_dict[31] = [40]    # Canola -> Canola
    # crop_num_dict[32] = [11]    # Flaxseed -> Spring Grain - irrigated
    # crop_num_dict[33] = [38]    # Safflower -> Safflower -irrigated
    # crop_num_dict[34] = [41]    # Rape Seed -> Mustard
    # crop_num_dict[35] = [41]    # Mustard -> Mustard
    # crop_num_dict[36] = [3]     # Alfalfa -> Alfalfa - Beef Style
    # crop_num_dict[37] = [4]     # Other Hay/Non Alfalfa -> Grass Hay
    # crop_num_dict[38] = [41]    # Camelina -> Mustard
    # crop_num_dict[39] = [41]    # Buckwheat -> Mustard
    # crop_num_dict[41] = [31]    # Sugarbeets -> Sugar beets
    # crop_num_dict[42] = [5]     # Dry Beans -> Snap and Dry Beans - fresh
    # crop_num_dict[43] = [30]    # Potatoes -> Potatoes
    # crop_num_dict[44] = [11]    # Other Crops -> Spring Grain - irrigated
    # crop_num_dict[45] = [76]    # Sugarcane -> Sugarcane
    # crop_num_dict[46] = [30]    # Sweet Potatoes -> Potatoes
    # crop_num_dict[47] = [21]    # Misc Vegs & Fruits -> Garden Vegetables  - general
    # crop_num_dict[48] = [24]    # Watermelons -> Melons
    # crop_num_dict[49] = [23]    # Onions -> Onions
    # crop_num_dict[50] = [21]    # Cucumbers -> Garden Vegetables  - general
    # crop_num_dict[51] = [5]     # Chick Peas -> Snap and Dry Beans - fresh
    # crop_num_dict[52] = [5]     # Lentils -> Snap and Dry Beans - fresh
    # crop_num_dict[53] = [27]    # Peas -> Peas--fresh
    # crop_num_dict[54] = [69]    # Tomatoes -> Tomatoes
    # crop_num_dict[55] = [75]    # Caneberries -> Cranberries
    # crop_num_dict[56] = [32]    # Hops -> Hops
    # crop_num_dict[57] = [21]    # Herbs -> Garden Vegetables  - general
    # crop_num_dict[58] = [41]    # Clover/Wildflowers -> Mustard
    # crop_num_dict[59] = [17]    # Sod/Grass Seed -> Grass - Turf (lawns) -irrigated
    # crop_num_dict[60] = [81]    # Switchgrass -> Sudan
    # crop_num_dict[66] = [19]    # Cherries -> Orchards - Apples and Cherries w/ground cover
    # crop_num_dict[67] = [19]    # Peaches -> Orchards - Apples and Cherries w/ground cover
    # crop_num_dict[68] = [19]    # Apples -> Orchards - Apples and Cherries w/ground cover
    # crop_num_dict[69] = [25]    # Grapes -> Grapes
    # crop_num_dict[70] = [82]    # Christmas Trees -> Christmas Trees
    # crop_num_dict[71] = [19]    # Other Tree Crops -> Orchards - Apples and Cherries w/ground cover
    # crop_num_dict[72] = [70]    # Citrus -> Oranges
    # crop_num_dict[74] = [74]    # Pecans -> Nuts
    # crop_num_dict[75] = [74]    # Almonds -> Nuts
    # crop_num_dict[76] = [74]    # Walnuts -> Nuts
    # crop_num_dict[77] = [19]    # Pears -> Orchards - Apples and Cherries w/ground cover
    # crop_num_dict[176] = [15]    # Grassland/Pasture -> Grass Pasture - high management
    # crop_num_dict[204] = [74]    # Pistachios -> Nuts
    # crop_num_dict[205] = [11]    # Triticale -> Spring Grain - irrigated
    # crop_num_dict[206] = [22]    # Carrots -> Carrots
    # crop_num_dict[207] = [21]    # Asparagus -> Aparagus
    # crop_num_dict[208] = [43]    # Garlic -> Garlic
    # crop_num_dict[209] = [24]    # Cantaloupes -> Melons
    # crop_num_dict[210] = [19]    # Prunes -> Orchards - Apples and Cherries w/ground cover
    # crop_num_dict[211] = [61]    # Olives -> Olives
    # crop_num_dict[212] = [70]    # Oranges -> Oranges
    # crop_num_dict[213] = [24]    # Honeydew Melons -> Melons
    # crop_num_dict[214] = [21]    # Broccoli -> Garden Vegetables  - general
    # crop_num_dict[216] = [59]    # Peppers -> Peppers
    # crop_num_dict[217] = [19]    # Pomegranates -> Orchards - Apples and Cherries w/ground cover
    # crop_num_dict[218] = [19]    # Nectarines -> Orchards - Apples and Cherries w/ground cover
    # crop_num_dict[219] = [21]    # Greens -> Garden Vegetables  - general
    # crop_num_dict[220] = [19]    # Plums -> Orchards - Apples and Cherries w/ground cover
    # crop_num_dict[221] = [62]    # Strawberries -> Strawberries
    # crop_num_dict[222] = [21]    # Squash -> Garden Vegetables  - general
    # crop_num_dict[223] = [19]    # Apricots -> Orchards - Apples and Cherries w/ground cover
    # crop_num_dict[224] = [6]     # Vetch -> Snap and Dry Beans - seed
    # crop_num_dict[225] = [77]    # Dbl Crop WinWht/Corn -> Field Corn After Another Crop
    # crop_num_dict[226] = [77]    # Dbl Crop Oats/Corn -> Field Corn After Another Crop
    # crop_num_dict[227] = [71]    # Lettuce -> Lettuce (Single Crop)
    # crop_num_dict[229] = [21]    # Pumpkins -> Garden Vegetables  - general
    # crop_num_dict[230] = [71, 84]    # Dbl Crop Lettuce/Durum Wht -> Grain After Another Crop
    # crop_num_dict[231] = [71, 83]    # Dbl Crop Lettuce/Cantaloupe -> Melons After Another Crop
    # crop_num_dict[232] = [71, 79]    # Dbl Crop Lettuce/Cotton -> Cotton After Another Crop
    # crop_num_dict[233] = [71, 84]    # Dbl Crop Lettuce/Barley -> Grain After Another Crop
    # crop_num_dict[234] = [71, 78]    # Dbl Crop Durum Wht/Sorghum -> Sorghum After Another Crop
    # crop_num_dict[235] = [71, 78]    # Dbl Crop Barley/Sorghum -> Sorghum After Another Crop
    # crop_num_dict[236] = [13, 78]    # Dbl Crop WinWht/Sorghum -> Sorghum After Another Crop
    # crop_num_dict[237] = [11, 77]    # Dbl Crop Barley/Corn -> Field Corn After Another Crop
    # crop_num_dict[238] = [13, 79]    # Dbl Crop WinWht/Cotton -> Cotton After Another Crop
    # crop_num_dict[239] = [66, 79]    # Dbl Crop Soybeans/Cotton -> Cotton After Another Crop
    # crop_num_dict[240] = [66, 84]    # Dbl Crop Soybeans/Oats -> Grain After Another Crop
    # crop_num_dict[241] = [7, 85]    # Dbl Crop Corn/Soybeans -> Soybeans After Another Crop
    # crop_num_dict[242] = [63]    # Blueberries -> Blueberries
    # crop_num_dict[243] = [80]    # Cabbage -> Cabbage
    # crop_num_dict[244] = [21]    # Cauliflower -> Garden Vegetables  - general
    # crop_num_dict[245] = [21]    # Celery -> Garden Vegetables  - general
    # crop_num_dict[246] = [21]    # Radishes -> Garden Vegetables  - general
    # crop_num_dict[247] = [21]    # Turnips -> Garden Vegetables  - general
    # crop_num_dict[248] = [21]    # Eggplants -> Garden Vegetables  - general
    # crop_num_dict[249] = [21]    # Gourds -> Garden Vegetables  - general
    # crop_num_dict[250] = [75]    # Cranberries -> Cranberries
    # crop_num_dict[254] = [11, 85]    # Dbl Crop Barley/Soybeans -> Soybeans After Another Crop
    # crop_num_dict[99] = [20]    #Empty CDL Placeholder for Orchards without Cover
    # crop_num_dict[98] = [86]    #Empty CDL Placeholder for AgriMet based "Grass Pasture- Mid Management"
    # crop_num_dict[97] = [16]    #Empty CDL Placeholder for "Grass Pasture- Low Management"

    # Check input folders
    if not os.path.isdir(gis_ws):
        logging.error('\nERROR: The GIS workspace does not exist'
                      '\n  {}'.format(gis_ws))
        sys.exit()
    elif not os.path.isdir(cdl_ws):
        logging.error('\nERROR: The CDL workspace does not exist'
                      '\n  {}'.format(cdl_ws))
        sys.exit()
    elif not os.path.isdir(soil_ws):
        logging.error('\nERROR: The soil workspace does not exist'
                      '\n  {}'.format(soil_ws))
        sys.exit()
    elif input_soil_ws != soil_ws and not os.path.isdir(input_soil_ws):
        logging.error('\nERROR: The input soil folder does not exist'
                      '\n  {}'.format(input_soil_ws))
        sys.exit()
    elif not os.path.isdir(zone_ws):
        logging.error('\nERROR: The zone workspace does not exist'
                      '\n  {}'.format(zone_ws))
        sys.exit()
    logging.info('\nGIS Workspace:   {}'.format(gis_ws))
    logging.info('CDL Workspace:   {}'.format(cdl_ws))
    logging.info('Soil Workspace:  {}'.format(soil_ws))
    if input_soil_ws != soil_ws:
        logging.info('Soil Workspace:  {}'.format(input_soil_ws))
    logging.info('Zone Workspace:  {}'.format(zone_ws))

    # Check input files
    if not os.path.isfile(snap_raster):
        logging.error('\nERROR: The snap raster does not exist'
                      '\n  {}'.format(snap_raster))
        sys.exit()
    elif not os.path.isfile(agland_path):
        logging.error('\nERROR: The agland raster does not exist'
                      '\n  {}'.format(agland_path))
        sys.exit()
    elif not os.path.isfile(agland_path):
        logging.error('\nERROR: The agmask raster does not exist'
                      '\n  {}'.format(agland_path))
        sys.exit()
    elif not os.path.isfile(zone_path):
        logging.error('\nERROR: The zone shapefile does not exist'
                      '\n  {}'.format(zone_path))
        sys.exit()

    arcpy.CheckOutExtension('Spatial')
    arcpy.env.pyramid = 'NONE 0'
    arcpy.env.overwriteOutput = overwrite_flag
    arcpy.env.parallelProcessingFactor = 8

    # Build output table folder if necessary
    if not os.path.isdir(table_ws):
        os.makedirs(table_ws)
    # if gdb_flag and not os.path.isdir(os.path.dirname(gdb_path)):
    #     os.makedirs(os.path.dirname(gdb_path))

    # Remove existing data if overwrite
    # if overwrite_flag and arcpy.Exists(et_cells_path):
    #     arcpy.Delete_management(et_cells_path)
    # if overwrite_flag and gdb_flag and arcpy.Exists(gdb_path):
    #     shutil.rmtree(gdb_path)

    # # Build output geodatabase if necessary
    # if gdb_flag and not arcpy.Exists(gdb_path):
    #     arcpy.CreateFileGDB_management(
    #         os.path.dirname(gdb_path), os.path.basename(gdb_path))

    raster_list = [
        [awc_field, 'MEAN', os.path.join(input_soil_ws, 'AWC_30m_albers.img')],
        [clay_field, 'MEAN', os.path.join(input_soil_ws, 'CLAY_30m_albers.img')],
        [sand_field, 'MEAN', os.path.join(input_soil_ws, 'SAND_30m_albers.img')],
        ['AG_COUNT', 'SUM', agmask_path],
        ['AG_ACRES', 'SUM', agmask_path],
        ['AG_' + awc_field, 'MEAN', os.path.join(
            soil_ws, 'AWC_{}_30m_cdls.img'.format(cdl_year))],
        ['AG_' + clay_field, 'MEAN', os.path.join(
            soil_ws, 'CLAY_{}_30m_cdls.img'.format(cdl_year))],
        ['AG_' + sand_field, 'MEAN', os.path.join(
            soil_ws, 'SAND_{}_30m_cdls.img'.format(cdl_year))]
    ]

    # The zone field must be defined
    if len(arcpy.ListFields(zone_path, zone_id_field)) == 0:
        logging.error('\nERROR: The zone ID field {} does not exist\n'.format(
            zone_id_field))
        sys.exit()
    elif len(arcpy.ListFields(zone_path, zone_name_field)) == 0:
        logging.error(
            '\nERROR: The zone name field {} does not exist\n'.format(
                zone_name_field))
        sys.exit()

    # The built in ArcPy zonal stats function fails if count >= 65536
    zone_count = int(arcpy.GetCount_management(zone_path).getOutput(0))
    logging.info('\nZone count: {}'.format(zone_count))
    if zone_count >= 65536:
        logging.error(
            '\nERROR: Zonal stats cannot be calculated since there '
            'are more than 65536 unique features\n  {}'.format(zone_path))
        sys.exit()

    # Copy the zone_path
    if overwrite_flag and arcpy.Exists(et_cells_path):
        arcpy.Delete_management(et_cells_path)
    # Just copy the input shapefile
    if not arcpy.Exists(et_cells_path):
        arcpy.Copy_management(zone_path, et_cells_path)

    # Join the stations to the zones and read in the matches
    # if not arcpy.Exists(et_cells_path):
    #     zone_field_list = [f.name for f in arcpy.ListFields(zone_path)]
    #     zone_field_list.append(station_id_field)
    #     zone_field_list.append('OBJECTID_1')
    #     arcpy.SpatialJoin_analysis(zone_path, station_path, et_cells_path)
    #     # arcpy.SpatialJoin_analysis(station_path, zone_path, et_cells_path)
    #     delete_field_list = [f.name for f in arcpy.ListFields(et_cells_path)
    #                          if f.name not in zone_field_list]
    #     logging.info('Deleting Fields')
    #     if field_name in delete_field_list:
    #         logging.debug('  {}'.format(field_name))
    #         try: arcpy.DeleteField_management(et_cells_path, field_name)
    #         except: pass


    # Get spatial reference
    output_sr = arcpy.Describe(et_cells_path).spatialReference
    snap_sr = arcpy.Raster(snap_raster).spatialReference
    snap_cs = arcpy.Raster(snap_raster).meanCellHeight
    logging.debug('  Zone SR: {}'.format(output_sr.name))
    logging.debug('  Snap SR: {}'.format(snap_sr.name))
    logging.debug('  Snap Cellsize: {}'.format(snap_cs))

    # Add lat/lon fields
    logging.info('Adding Fields')
    field_list = [f.name for f in arcpy.ListFields(et_cells_path)]
    if cell_lat_field not in field_list:
        logging.debug('  {}'.format(cell_lat_field))
        arcpy.AddField_management(et_cells_path, cell_lat_field, 'DOUBLE')
        lat_lon_flag = True
    if cell_lon_field not in field_list:
        logging.debug('  {}'.format(cell_lon_field))
        arcpy.AddField_management(et_cells_path, cell_lon_field, 'DOUBLE')
        lat_lon_flag = True
    # Cell/station ID
    if cell_id_field not in field_list:
        logging.debug('  {}'.format(cell_id_field))
        arcpy.AddField_management(et_cells_path, cell_id_field, 'TEXT',
                                  '', '', 24)
    if cell_name_field not in field_list:
        logging.debug('  {}'.format(cell_name_field))
        arcpy.AddField_management(et_cells_path, cell_name_field, 'TEXT',
                                  '', '', 48)
    if station_id_field not in field_list:
        logging.debug('  {}'.format(station_id_field))
        arcpy.AddField_management(et_cells_path, station_id_field, 'TEXT',
                                  '', '', 24)
    if zone_id_field not in field_list:
        logging.debug('  {}'.format(zone_id_field))
        arcpy.AddField_management(et_cells_path, zone_id_field, 'TEXT',
                                  '', '', 8)

    # Status flags
    # if active_flag_field not in field_list:
    #     logging.debug('  {}'.format(active_flag_field))
    #     arcpy.AddField_management(et_cells_path, active_flag_field, 'SHORT')
    # if irrig_flag_field not in field_list:
    #     logging.debug('  {}'.format(irrig_flag_field))
    #     arcpy.AddField_management(et_cells_path, irrig_flag_field, 'SHORT')
    # Add zonal stats fields
    for field_name, stat, raster_path in raster_list:
        if field_name not in field_list:
            logging.debug('  {}'.format(field_name))
            arcpy.AddField_management(et_cells_path, field_name, 'FLOAT')

    # Other soil fields
    if awc_in_ft_field not in field_list:
        logging.debug('  {}'.format(awc_in_ft_field))
        arcpy.AddField_management(et_cells_path, awc_in_ft_field, 'FLOAT', 
                                  8, 4)
    if hydgrp_num_field not in field_list:
        logging.debug('  {}'.format(hydgrp_num_field))
        arcpy.AddField_management(et_cells_path, hydgrp_num_field, 'SHORT')
    if hydgrp_field not in field_list:
        logging.debug('  {}'.format(hydgrp_field))
        arcpy.AddField_management(et_cells_path, hydgrp_field, 'TEXT',
                                  '', '', 1)
    # if permeability_field not in field_list:
    #     logging.debug('  {}'.format(permeability_field))
    #     arcpy.AddField_management(et_cells_path, permeability_field, 'FLOAT')
    # if soil_depth_field not in field_list:
    #     logging.debug('  {}'.format(soil_depth_field))
    #     arcpy.AddField_management(et_cells_path, soil_depth_field, 'FLOAT')
    # if aridity_field not in field_list:
    #     logging.debug('  {}'.format(aridity_field))
    #     arcpy.AddField_management(et_cells_path, aridity_field, 'FLOAT')

    # Cuttings
    # if dairy_cutting_field not in field_list:
    #     logging.debug('  {}'.format(dairy_cutting_field))
    #     arcpy.AddField_management(et_cells_path, dairy_cutting_field, 'SHORT')
    # if beef_cutting_field not in field_list:
    #     logging.debug('  {}'.format(beef_cutting_field))
    #     arcpy.AddField_management(et_cells_path, beef_cutting_field, 'SHORT')

    # Crop fields are only added for needed crops (after zonal histogram)
    # for crop_num in crop_num_list:
    #     field_name = 'CROP_{0:02d}'.format(crop_num)
    #     if field_name not in field_list:
    #         logging.debug('  {}'.format(field_name))
    #         arcpy.AddField_management(et_cells_path, field_name, 'LONG')

    # Calculate lat/lon
    logging.info('Calculating lat/lon')
    cell_lat_lon_func(et_cells_path, 'LAT', 'LON', output_sr.GCS)

    # Set CELL_ID and CELL_NAME
    #zone_id_field must be a string
    arcpy.CalculateField_management(
        et_cells_path, cell_id_field,
        'str(!{}!)'.format(zone_id_field), 'PYTHON')
    arcpy.CalculateField_management(
        et_cells_path, cell_name_field,
        '"{}" + str(!{}!)'.format(zone_name_str, zone_name_field), 'PYTHON')

    # Remove existing (could use overwrite instead)
    zone_proj_path = os.path.join(table_ws, zone_proj_name)
    zone_raster_path = os.path.join(table_ws, zone_raster_name)
    if overwrite_flag and arcpy.Exists(zone_proj_path):
        arcpy.Delete_management(zone_proj_path)
    if overwrite_flag and arcpy.Exists(zone_raster_path):
        arcpy.Delete_management(zone_raster_path)

    # Project zones to match CDL/snap coordinate system
    logging.info('Projecting zones')
    if arcpy.Exists(et_cells_path) and not arcpy.Exists(zone_proj_path):
        arcpy.Project_management(et_cells_path, zone_proj_path, snap_sr)

    # Convert the zones polygon to raster
    logging.info('Converting zones to raster')
    if arcpy.Exists(zone_proj_path) and not arcpy.Exists(zone_raster_path):
        arcpy.env.snapRaster = snap_raster
        # arcpy.env.extent = arcpy.Describe(snap_raster).extent
        arcpy.FeatureToRaster_conversion(
            zone_proj_path, cell_id_field, zone_raster_path, snap_cs)
        arcpy.ClearEnvironment('snapRaster')
        # arcpy.ClearEnvironment('extent')

    # # Link zone raster Value to zone field
    # #zone_id_field must be a string
    fields = ('Value', cell_id_field)
    zone_value_dict = {
        r[0]: r[1] for r in arcpy.da.SearchCursor(zone_raster_path, fields)}

    # Calculate zonal stats
    logging.info('\nProcessing soil rasters')
    for field_name, stat, raster_path in raster_list:
        logging.info('{} {}'.format(field_name, stat))
        table_path = os.path.join(
            table_ws, table_fmt.format(field_name.lower()))
        if overwrite_flag and os.path.isfile(table_path):
            arcpy.Delete_management(table_path)
        if not os.path.isfile(table_path) and os.path.isfile(zone_raster_path):
            table_obj = arcpy.sa.ZonalStatisticsAsTable(
                zone_raster_path, 'VALUE', raster_path,
                table_path, 'DATA', stat)
            del table_obj

        # Read in zonal stats values from table
        # Value is the Value in zone_raster_path (not the zone
        zs_dict = {
            zone_value_dict[row[0]]: row[1]
            for row in arcpy.da.SearchCursor(table_path, ('Value', stat))}
        # zs_dict = dict()
        # fields = ('Value', stat)
        # with arcpy.da.SearchCursor(table_path, fields) as s_cursor:
        #      row in s_cursor:
        #        zs_dict[row[0]] = row[1]

        # Write zonal stats values to zone polygon shapefile
        fields = (cell_id_field, field_name)
        with arcpy.da.UpdateCursor(et_cells_path, fields) as u_cursor:
            for row in u_cursor:
                row[1] = zs_dict.pop(row[0], 0)
                u_cursor.updateRow(row)

    # Calculate agricultural area in acres
    logging.info('\nCalculating agricultural acreage')
    arcpy.CalculateField_management(
        et_cells_path, 'AG_ACRES',
        '!AG_COUNT! * {0} * {1} * {1}'.format(sqm_2_acres, snap_cs), 'PYTHON')

    # Calculate AWC in in/feet
    logging.info('Calculating AWC in in/ft')
    arcpy.CalculateField_management(
        et_cells_path, awc_in_ft_field,
        '!{}! * 12'.format(awc_field), 'PYTHON')

    # Calculate hydrologic group
    logging.info('Calculating hydrologic group')
    fields = (clay_field, sand_field, hydgrp_num_field, hydgrp_field)
    with arcpy.da.UpdateCursor(et_cells_path, fields) as u_cursor:
        for row in u_cursor:
            if row[1] > 50:
                row[2], row[3] = 1, 'A'
            elif row[0] > 40:
                row[2], row[3] = 3, 'C'
            else:
                row[2], row[3] = 2, 'B'
            u_cursor.updateRow(row)

    # # Calculate default values
    # logging.info('\nCalculating default values')
    # logging.info('  {:10s}: {}'.format(
    #     active_flag_field, active_flag_default))
    # arcpy.CalculateField_management(
    #     _cells_path, active_flag_field, active_flag_default, 'PYTHON')
    # logging.info('  {:10s}: {}'.format(irrig_flag_field, irrig_flag_default))
    # arcpy.CalculateField_management(
    #     _cells_path, irrig_flag_field, irrig_flag_default, 'PYTHON')
    #
    # logging.info('  {:10s}: {}'.format(
    #     permeability_field, permeability_default))
    # arcpy.CalculateField_management(
    #     _cells_path, permeability_field, permeability_default, 'PYTHON')
    # logging.info('  {:10s}: {}'.format(soil_depth_field, soil_depth_default))
    # arcpy.CalculateField_management(
    #     _cells_path, soil_depth_field, soil_depth_default, 'PYTHON')
    # logging.info('  {:10s}: {}'.format(aridity_field, aridity_default))
    # arcpy.CalculateField_management(
    #     _cells_path, aridity_field, aridity_default, 'PYTHON')
    #
    # logging.info('  {:10s}: {}'.format(
    #     dairy_cutting_field, dairy_cutting_default))
    # arcpy.CalculateField_management(
    #     _cells_path, dairy_cutting_field, dairy_cutting_default, 'PYTHON')
    # logging.info('  {:10s}: {}'.format(
    #     beef_cutting_field, beef_cutting_default))
    # arcpy.CalculateField_management(
    #     _cells_path, beef_cutting_field, beef_cutting_default, 'PYTHON')

    # Calculate crop zonal stats

    # Copied from above for testing
    #zone_raster_path = os.path.join(table_ws, zone_raster_name)

    logging.info('\nCalculating crop zonal stats')
    temp_table_ws = os.path.join(table_ws, 'crop_tables')

    # #Create folder if it doesn't exist
    if not os.path.isdir(temp_table_ws):
        os.makedirs(temp_table_ws)

    # Loop through zones one by one (ZonalHistrogram takes a max of 255 zones)
    # Break ZonalHistogram call down into 250 zone steps and check that all zones exist in output table
    temp_list = []
    step_size = 250
    zone_raster_obj = arcpy.Raster(zone_raster_path)
    for i in range(1, zone_count+1, step_size):
    # for i in range(3251, zone_count + 1, step_size):
        start = time.clock()
        logging.info('Zones: {} to {}'.format(i, i+step_size-1))
        # Create temporary path for each zone in memory
        temp_table_path = os.path.join(temp_table_ws, 'crop_table_{}_{}.dbf').format(i, (i+step_size-1))
        # Add temporary paths to list for merge
        temp_list.append(temp_table_path)
        # Run ZonalHistogram on group of zone
        single_zone = arcpy.sa.Con((zone_raster_obj >= i) & (zone_raster_obj < (i + step_size)), zone_raster_obj)
        table_obj = arcpy.sa.ZonalHistogram(
            single_zone, 'VALUE', agland_path, temp_table_path)

        # Look for missing zones in output table
        field_name_list = [f.name for f in arcpy.ListFields(temp_table_path)]
        value_list = [f.split('_')[-1] for f in field_name_list]
        value_list.remove('OID')
        value_list = list(map(int, value_list))

        # if not set((range(i, i+step_size))) & set(value_list):
        if len(set(range(i, np.clip(i+step_size, 1, zone_count+1))).difference(set(value_list))) > 0:
            print('Output Table Missing Zones (Check Input Station and CDL Files for ag area):')
            print(sorted(set(range(i, np.clip(i+step_size, 1, zone_count+1))).difference(set(value_list))))
            # sys.exit()
        del table_obj
        print("ZonalHistogram Runtime: {}".format(time.clock() - start))
    del zone_raster_obj

    # Read in zonal stats values from table
    logging.info('Reading crop zonal stats')
    zone_crop_dict = defaultdict(dict)
    for temp_table_path in temp_list:
        logging.info(temp_table_path)
        field_name_list = [f.name for f in arcpy.ListFields(temp_table_path)]
        value_list = [f.split('_')[-1] for f in field_name_list]
        logging.debug('  Crop histogram field list:\n    {}'.format(
            ', '.join(field_name_list)))
        with arcpy.da.SearchCursor(temp_table_path, '*') as s_cursor:
            for i, row in enumerate(s_cursor):
                # Row id is 1 based, but FID/CDL is 0 based? THIS IS WRONG FOR DBFs? vs INFO
                # cdl_number set to i after modifying ZonalHistogram to write to DBF not memory
                # cdl_number = int(row[0] - 1)
                cdl_number = i
                # Only 'crops' have a crop number (no shrub, water, urban, etc.)
                if cdl_number not in crop_num_dict.keys():
                    logging.debug('  Skipping CDL {}'.format(cdl_number))
                    continue
                # Crop number can be an integer or list of integers (double crops)
                crop_number = crop_num_dict[cdl_number]
                # Crop numbers of -1 are for crops that haven't been linked
                #   to a CDL number
                if not crop_number or crop_number == -1:
                    logging.warning('  Missing CDL {}'.format(cdl_number))
                    continue
                # Get values
                for j, cell in enumerate(row):
                    if j > 0 and row[j] != 0:
                        # Save acreage twice for double crops
                        for c in crop_number:
                            zone_str = zone_value_dict[int(value_list[j])]
                            zone_crop_dict[zone_str][c] = row[j]
        # if cleanup_flag and arcpy.Exists(temp_table_path):
        #     arcpy.Delete_management(temp_table_path)

    # Get unique crop number values and field names
    crop_number_list = sorted(list(set([
        crop_num for crop_dict in zone_crop_dict.values()
        for crop_num in crop_dict.keys()])))
    logging.debug('Crop number list: ' + ', '.join(map(str, crop_number_list)))
    crop_field_list = sorted([
        'CROP_{:02d}'.format(crop_num) for crop_num in crop_number_list])
    logging.debug('Crop field list: ' + ', '.join(crop_field_list))

    # Add fields for CDL values
    logging.info('Writing crop zonal stats')
    for field_name in crop_field_list:
        if field_name not in field_list:
            logging.debug('  {}'.format(field_name))
            arcpy.AddField_management(et_cells_path, field_name, 'FLOAT')

    # Write zonal stats values to zone polygon shapefile
    # DEADBEEF - This is intentionally writing every cell
    #   0's are written for cells with nodata
    fields = crop_field_list + [cell_id_field]
    with arcpy.da.UpdateCursor(et_cells_path, fields) as u_cursor:
        for row in u_cursor:
            crop_dict = zone_crop_dict.pop(row[-1], dict())
            for crop_i, crop_number in enumerate(crop_number_list):
                # Convert pixel counts to acreage
                crop_pixels = crop_dict.pop(crop_number, 0)
                row[crop_i] = crop_pixels * sqm_2_acres * snap_cs ** 2
            u_cursor.updateRow(row)

    if cleanup_flag and arcpy.Exists(zone_proj_path):
        arcpy.Delete_management(zone_proj_path)
    if cleanup_flag and arcpy.Exists(zone_raster_path):
        arcpy.Delete_management(zone_raster_path)