def calculateDensity(fcpolygon, attribute, geodatabase="assignment2.gdb"): arcpy.env.workspace = geodatabase #Quick exception handling for file type describePoly = arcpy.Describe(fcpolygon) describeWork = arcpy.Describe(geodatabase) field_list = [] for field in arcpy.ListFields(fcpolygon): field_list.append(field.name) if describePoly.shapetype != "Polygon" or describeWork.dataType != "Workspace": print("You did not give me the correct file types.") elif attribute not in field_list: print("{0} is not a field in {1}".format(attribute, fcpolygon)) else: #create new field for our area calculation a_field = "area_sq_miles" arcpy.management.AddField(fcpolygon, a_field, "DOUBLE") # If the Polygon FC has a projected SR, give a warning and calculate it using AREA if describePoly.spatialReference.type == "Projected": print( "{0} is a geographic projection - area calculations will not be accurate" .format(fcpolygon)) # Generate the extent coordinates using CalculateGeometry arcpy.CalculateGeometryAttributes_management( fcpolygon, [[a_field, "AREA"]], "MILES_US", "SQUARE_MILES_US") #If the PolygonFC has a geometric SR, calculate area with GEODESIC else: # Generate the extent coordinates using CalculateGeometry arcpy.CalculateGeometryAttributes_management( fcpolygon, [[a_field, "AREA_GEODESIC"]], "MILES_US", "SQUARE_MILES_US") #create new field for our density calculation d_field = "density_sq_miles" arcpy.management.AddField(fcpolygon, d_field, "DOUBLE") #calculate the density of the count variable #according to the size of the polygon. expression = "!" + attribute + "!/!" + a_field + "!" arcpy.CalculateField_management(fcpolygon, d_field, expression, "PYTHON3")
def calculateDensity(fcpolygon, attribute, geodatabase="assignment2.gdb"): #import arcpy, os, and set overwriteOutput to True import arcpy import os arcpy.env.overwriteOutput = True #set the workspace arcpy.env.workspace = "geodatabase" #find the spatial reference for the polygon desc = arcpy.Describe(states).spatialReference #if the spatial reference system is in WHS84 or NAD83 return a message telling user to reproject the shape if desc.Name == "WGS84" or "NAD83": print( "Reproject polygon to coordinate system that can more acturately calculate area." ) #else statement tell what code to run if in a coordinate system that should work else: #add a field to populate with the area of the polygon arcpy.management.AddField(fcpolygon, "area_sqmi", "DOUBLE") #calculate the geometry of the shapes in the polygon feature class arcpy.CalculateGeometryAttributes_management( fcpolygon, [["area_sqmi", "AREA_GEODESIC"]], "MILES_US") #add a field to populate with the density of the attribute arcpy.management.AddField(states, "Density", "DOUBLE") #caluculate the density of the attribute using CalculateField arcpy.management.CalculateField(fcpolygon, "Density", attribute / "area_sqmi", "SQL")
def estimateTotalLineLengthInPolygons(fcLine, fcClipPolygon, polygonIDFieldName, clipPolygonID, geodatabase = "assignment2.gdb"): #test for existence of data types if arcpy.Exists(geodatabase): #set workspace to user input geodatabase arcpy.env.workspace = geodatabase print("Environment workspace is set to: ", geodatabase) else: print("Workspace", geodatabase, "does not exist!") sys.exit(1) #use try to identify errors in the types of data try: #use the describe function to determine the element data type desc_fcLine = arcpy.Describe(fcLine) desc_fcClipPolygon = arcpy.Describe(fcClipPolygon) if desc_fcLine.shapeType != "Polyline": print("Error shapeType: ", fcLine, "needs to be a polyline type!") sys.exit(1) if desc_fcClipPolygon.shapeType != "Polygon": print("Error name: ", fcClipPolygon, "needs to be a polygon type!") sys.exit(1) #Transform the projection of one to other if the line and polygon shapefiles have different projections if desc_fcLine.spatialReference.name != desc_fcClipPolygon.spatialReference.name: print("Coordinate system error: Spatial reference of", fcLine, "and", fcClipPolygon, "should be the same.") sys.exit(1) #identify input coordinate system unit of measurement if desc.spatialReference.linearUnitName != "miles": print("Error: coordinate system unit measurement needs to be in miles!") #create output file with the same name as existing file by overwriting it arcpy.env.overwriteOutput = True #list feature classes fcList = arcpy.ListFeatureClasses() for fc in fcList: print(fc) #define variables fcLine = river_network.shp fcClipPolygon = states.shp polygonIDFieldName = Iowa clipPolygonID = rivers #id or name field that could uniquely identify a feature in the polygon feature class arcpy.AddField_management(polygonIDFieldName, "geoid", "TEXT") #Create update cursor for feature class with arcpy.da.UpdateCursor(fcClipPolygon, ["Field1", "geoid"]) as cursor: # update geoid using Field1 for row in cursor: field1_list = row[0].split(", ") greater_list = field1_list[-1].split("> ") geoid_str = "" for item in greater_list: colon_list = item.split(":") geoid_str += colon_list[1] #print(geoid_str) row[1] = geoid_str cursor.updateRow(row) arcpy.AddField_management(polygonIDFieldName, "length", "DOUBLE") #select attributes arcpy.Select_analysis(fcLine, fcClipPolygon, clipPolygonID) #clips the linear feature class by the selected polygon boundary, arcpy.Clip_analysis(clipPolygonID, bufferOutput, clipPolygonID) #calculates and returns the total length of the line features (e.g., rivers) in miles for the selected polygon arcpy.CalculateGeometryAttributes_management(polygonIDFieldName, [["length", "LENGTH_GEODESIC"]], "MILES_US")
def intersect(): """ Intersects dissolve wiht MRS boundary Returns: Polygon """ arcpy.env.workspace = r'C:\Users\JBurton_AOR\Documents\ArcGIS\Projects\Oahu\Oahu.gdb' arcpy.env.overwriteOutput = 'True' #variables for intersect analysis MRS = r'C:\Users\JBurton_AOR\Documents\ArcGIS\Projects\Oahu\Oahu.gdb\MRS_WGS84_UTM4N' Coverage = r'C:\Users\JBurton_AOR\Documents\ArcGIS\Projects\Oahu\Oahu.gdb\Total_Coverage' outGDB = r'C:\Users\JBurton_AOR\Documents\ArcGIS\Projects\Oahu\Oahu.gdb\Oahu_Total_Coverage' arcpy.Delete_management(outGDB) print('Oahu Total Coverage deleted') inFeatures = ['MRS_WGS84_UTM4N', 'Total_Coverage'] intersectOutput = 'Oahu_Total_Coverage' arcpy.Intersect_analysis(inFeatures, outGDB) print('intersect complete') #add ACRES field arcpy.AddField_management(outGDB, 'ACRES', 'FLOAT') print('added fields') field = 'ACRES' #Calculate geometry - acres arcpy.CalculateGeometryAttributes_management(outGDB, [[field, 'AREA']], area_unit='Acres') print('calculated acreage') #arcpy.CopyFeatures_management(intersectOutput, outGDB) print('Total Coverage was copied to Oahu_Dashboard.gdb')
def addFields(): """ Makes a list of the feature classes, adds fields, populates data Returns: Modified feature classes """ logs = arcpy.ListFeatureClasses() print (logs) #Variable for Calculate Geometry field = 'ACRES' #for loop to add fields and calculate acreage for fc in logs: print ('processing' + " " + fc) #add FILE field arcpy.AddField_management(fc, 'FILE', 'TEXT') #add ACRES field arcpy.AddField_management(fc, 'ACRES', 'FLOAT') print ('added fields') #Calculate geometry - acres arcpy.CalculateGeometryAttributes_management(fc, [[field, 'AREA']], area_unit='Acres') #define field name and expression fieldName = 'FILE' expression = str(fc) #populates field #Calculate FILE name arcpy.CalculateField_management(fc, fieldName, '"'+expression+'"', "PYTHON") print ('calculated fields') return
def raster2shp(raster_name, out_shp_name=str(), simplify="NO_SIMPLIFY", calculate_area=True): # raster_name = STR of full path to INTEGER Raster if out_shp_name.__len__() < 1: out_shp_name = raster_name.split(".")[0] + ".shp" arcpy.CheckOutExtension('Spatial') arcpy.RasterToPolygon_conversion(Int(arcpy.Raster(raster_name)), out_shp_name, simplify) if calculate_area: arcpy.AddField_management(out_shp_name, "F_AREA", "FLOAT", 9) arcpy.CalculateGeometryAttributes_management( out_shp_name, geometry_property=[["F_AREA", "AREA"]], area_unit=out_shp_name) arcpy.CheckInExtension('Spatial') return out_shp_name
def network_with_facility(facility_mode='bikeway', level='regionwide'): arcpy.MakeFeatureLayer_management(facilities, facility_mode, "mode = '{0}'".format(facility_mode)) arcpy.MakeFeatureLayer_management(network, "network") arcpy.CalculateGeometryAttributes_management( "network", [["Length_mi", "LENGTH_GEODESIC"]], "MILES_US") if level == 'regionwide': arcpy.SelectLayerByLocation_management("network", "INTERSECT", facility_mode, "70 Feet") elif level == 'equity-focused areas': equity_area = os.path.join(path, 'service_transit_equity/equity_area.shp') arcpy.MakeFeatureLayer_management(equity_area, "equity_area") arcpy.SelectLayerByLocation_management("network", "INTERSECT", "equity_area") arcpy.SelectLayerByLocation_management("network", "INTERSECT", facility_mode, "70 Feet", "SUBSET_SELECTION") else: if level == '1/4 miles from transit stops': transit_stops = os.path.join( path, 'service_transit_equity/transit_stops.shp') arcpy.MakeFeatureLayer_management(transit_stops, "transit_stops") else: high_freq_transit = os.path.join( path, 'service_transit_equity/high_frequency_transit.shp') arcpy.MakeFeatureLayer_management(high_freq_transit, "transit_stops") arcpy.SelectLayerByLocation_management("network", "WITHIN_A_DISTANCE_GEODESIC", "transit_stops", "0.25 Miles") arcpy.SelectLayerByLocation_management("network", "INTERSECT", facility_mode, "70 Feet", "SUBSET_SELECTION") arcpy.Statistics_analysis("network", "network_w_" + facility_mode, [["Length_mi", "SUM"]]) fields = [f.name for f in arcpy.ListFields("network_w_" + facility_mode)] arr = arcpy.da.TableToNumPyArray("network_w_" + facility_mode, fields) table = pd.DataFrame(arr, columns=fields) return round(table['SUM_Length_mi'].values[0], 2)
def addFields(): """ Makes a list of the feature classes, adds fields, populates data Returns: Modified feature classes """ arcpy.env.workspace = r'C:\Users\JBurton_AOR\Documents\ArcGIS\Projects\Oahu\Daily_Coverage.gdb' arcpy.env.overwriteOutput = 'True' logs = arcpy.ListFeatureClasses() print(logs) #Variable for Calculate Geometry field = 'ACRES' #for loop to add fields and calculate acreage for fc in logs: print('processing' + " " + fc) #add FILE field arcpy.AddField_management(fc, 'FILE_NAME', 'TEXT') #add ACRES field arcpy.AddField_management(fc, 'ACRES', 'FLOAT') #add CULM_ACRES field arcpy.AddField_management(fc, 'CULM_ACRES', 'FLOAT') print('added fields') #Calculate geometry - acres arcpy.CalculateGeometryAttributes_management(fc, [[field, 'AREA']], area_unit='Acres') #define field name and expression fieldName = 'FILE_NAME' expression = str(fc) #populates field #Calculate FILE name arcpy.CalculateField_management(fc, fieldName, '"' + expression + '"', "PYTHON") print('calculated fields') return
def filter_pols(pol, name): # Inward buffer polygons by 2 m pol_in = arcpy.Buffer_analysis(pol, 'pol_in.shp', '-2 METERS') # Calculate geomtric attributes arcpy.CalculateGeometryAttributes_management( pol_in, [['area', 'AREA'], ['perim', 'PERIMETER_LENGTH']], 'METERS', 'SQUARE_METERS') # Calculate area to perimeter ratio and remove thin polygons cursor = arcpy.da.UpdateCursor(pol_in, ['area', 'perim']) for row in cursor: if row[0] / row[1] < 0.5: cursor.deleteRow() pol_out = arcpy.CopyFeatures_management(pol_in, name + '_filt.shp') # Remove unused variables del cursor, pol_in return (pol_out)
def get_polygon_coord(infeature): temptfeature = infeature + 'tempt' tempttable = infeature + 'tempttable' # 将坐标系转为大地坐标系,并计算经纬度 arcpy.Project_management( in_dataset=infeature, out_dataset=temptfeature, out_coor_system= "GEOGCS['GCS_WGS_1984',DATUM['D_WGS_1984',SPHEROID['WGS_1984',6378137.0,298.257223563]],PRIMEM['Greenwich',0.0],UNIT['Degree',0.0174532925199433]]", transform_method=[], in_coor_system= "PROJCS['CGCS2000_3_Degree_GK_Zone_35',GEOGCS['GCS_China_Geodetic_Coordinate_System_2000',DATUM['D_China_2000',SPHEROID['CGCS2000',6378137.0,298.257222101]],PRIMEM['Greenwich',0.0],UNIT['Degree',0.0174532925199433]],PROJECTION['Gauss_Kruger'],PARAMETER['False_Easting',35500000.0],PARAMETER['False_Northing',0.0],PARAMETER['Central_Meridian',105.0],PARAMETER['Scale_Factor',1.0],PARAMETER['Latitude_Of_Origin',0.0],UNIT['Meter',1.0]]", preserve_shape="NO_PRESERVE_SHAPE", max_deviation="", vertical="NO_VERTICAL") arcpy.AddFields_management( in_table=temptfeature, field_description=[["Left", "DOUBLE", "", "", "", ""], ["Bottom", "DOUBLE", "", "", "", ""], ["Right", "DOUBLE", "", "", "", ""], ["Top", "DOUBLE", "", "", "", ""]]) # 计算矩形四个角点的经纬度 arcpy.CalculateGeometryAttributes_management( temptfeature, [["Left", "EXTENT_MIN_X"], ["Bottom", "EXTENT_MIN_Y"], ["Right", "EXTENT_MAX_X"], ["Top", "EXTENT_MAX_Y"]]) arcpy.Statistics_analysis(in_table=temptfeature, out_table=tempttable, statistics_fields=[["Left", "MIN"], ["Bottom", "MIN"], ["Right", "MAX"], ["Top", "MAX"]], case_field=[]) cursor = arcpy.SearchCursor(tempttable) original_coord = [] for row in cursor: leftmin = row.MIN_Left rightmax = row.MAX_Right bottommin = row.MIN_Bottom topmax = row.MAX_Top original_coord = [leftmin, topmax, rightmax, bottommin] return original_coord
def calculateRoadSegmentsInPolygon(inputGeodatabase, featureClassA, featureClassB): #OBJECTIVE: calculate the total length of road segments in meters from featureClassB (a polyline feature class) in featureClassA (a polygon feature class) #import packages import arcpy import sys #create output file with the same name as existing file by overwriting it arcpy.env.overwriteOutput = True #set workspace to user input geodatabase arcpy.env.workspace = inputGeodatabase #add a new field to feature class B to keep track of how many line segments are in each polygon arcpy.AddField_management(featureClassB, "total_length", "DOUBLE") #define output for intersect analysis intersection = "B_Intersects_A" #calculate geometric intersection of features arcpy.Intersect_analysis([featureClassB, featureClassA], intersection) #calculate total length in meters in the intersected feature class arcpy.CalculateGeometryAttributes_management( intersection, [["total_length", "LENGTH_GEODESIC"]], "METERS") #calculate the summary stats for the field summary = arcpy.Statistics_analysis(featureClassB, intersection, "SUM", "total_length")
def calculate_point_per_poly(shape_file): total_area = 0 arcpy.AddMessage('Generating area for features...') arcpy.AddField_management(shape_file, field_name='Temp_Area', field_type='DOUBLE') arcpy.CalculateGeometryAttributes_management( shape_file, geometry_property=[['Temp_Area', 'AREA_GEODESIC']], area_unit='SQUARE_METERS', coordinate_system=out_cor) #get the total area arcpy.AddMessage('Computing the total area...') for row in arcpy.da.SearchCursor(shape_file, 'Temp_Area'): total_area += row[0] #print('Temp_Area',row[0]) #calculate the sampling size via total are and distribute to each feature according to area_percentage arcpy.AddField_management(shape_file, field_name='SampleSize', field_type='SHORT') arcpy.AddMessage('Computing the sampling size for each feature...') with arcpy.da.UpdateCursor(shape_file, ['Temp_Area', 'SampleSize']) as cursor: for row in cursor: area_percent = row[0] / total_area if CheckBox_pointfile == 'false': row[1] = int(round(int(point) * area_percent)) elif CheckBox_pointfile == 'true': n = (1.96 * 1.96 * int(Accuracy) * (100 - int(Accuracy))) / (int(Error_Margin) * int(Error_Margin)) Total_point = arcpy.GetCount_management(Point_file) total_point = n / (1 + ((n - 1) / int(Total_point[0]))) row[1] = int(round(total_point * area_percent)) #print('Sample Size:',row[1]) cursor.updateRow(row)
def calculateDensity(fcpolygon, attribute, geodatabase = "assignment2.gdb"): #create output file with the same name as existing file by overwriting it arcpy.env.overwriteOutput = True #test for existence of data types if arcpy.Exists(geodatabase): #set workspace to user input geodatabase arcpy.env.workspace = geodatabase print("Environment workspace is set to: ", geodatabase) else: print("Workspace", geodatabase, "does not exist!") sys.exit(1) #use try to identify errors in the types of data try: #use the describe function to determine the element data type desc_fcpolygon = arcpy.Describe(fcpolygon) if desc_fcpolygon.shapeType != "Polygon": print("Error shapeType: ", fcpolygon, "needs to be a polygon type!") sys.exit(1) print(desc1.spatialReference.name, desc2.spatialReference.name) spatial_ref = arcpy.Describe(fcpolygon).spatialReference #identify input coordinate system unit of measurement if desc.spatialReference.linearUnitName != "miles": print("Error: coordinate system unit measurement needs to be in miles!") #if the spatial reference is unknown if spatial_ref.name == "Unknown": print("{} has an unknown spatial reference".format(fc)) #if spatial reference is a geographic projection if spatial_ref.name == "WGS84" or "NAD83": print("{} has a geographic projection as spatial reference!".format(fc)) # Otherwise, print out the feature class name and spatial reference else: print("{} : {}".format(fc, spatial_ref.name)) #Calculate the area of each polygon in square miles and append to a new column arcpy.AddField_management(areaColumn, "density", "DOUBLE") #new field is equal to old field divided by the normalizing field arcpy.management.CalculateField(fcpolygon, "density_sqm", attribute/areaColumn,"PYTHON_9.3") #Calculate area in square miles to new column arcpy.CalculateGeometryAttributes_management(areaColumn, [["density_sqm", "AREA_GEODESIC"]], "MILES_US")
--------------------------------------------------------------------------------""" # Import modules import arcpy from arcpy import env inGDB = arcpy.GetParameterAsText(0) # Set workspace and environment variables arcpy.env.workspace = inGDB arcpy.env.overwriteOutput = True arcpy.AddMessage("Searching the geodatabase now...") fcList = arcpy.ListFeatureClasses("*", "point") fcCount = len(fcList) arcpy.AddMessage( "{0} application points layers in this geodatabase.".format(fcCount)) # Use list to add fields and calculate XY for fc in fcList: arcpy.management.AddFields(fc, [['Updated_X', 'DOUBLE', 'Updated X'], ['Updated_Y', 'DOUBLE', 'Updated Y']]) arcpy.CalculateGeometryAttributes_management( fc, [["Updated_X", "POINT_X"], ["Updated_Y", "POINT_Y"]]) arcpy.AddMessage("Fields added and geometry calulated for {0}".format(fc))
def main(best_plant_dir=str(), lf_dir=str(), crit_lf=float(), prj_name=str(), unit=str(), version=str()): """ derive and draw stabilizing features for vegetation plantings crit_lf = 2.5 # years of minimum plant survival without stabilization prj_name = "TBR" # corresponding to folder name unit = "us" or "si" version = "v10" # type() = 3-char str: vII """ logger = logging.getLogger("logfile") logger.info("STABILIZING PLANTS ----- ----- ----- -----") if unit == "us": area_units = "SQUARE_FEET_US" ft2_to_acres = config.ft2ac else: area_units = "SQUARE_METERS" ft2_to_acres = 1.0 arcpy.CheckOutExtension('Spatial') arcpy.gp.overwriteOutput = True dir2pp = config.dir2pm + prj_name + "_" + version + "\\" # folder settings ras_dir = dir2pp + "Geodata\\Rasters\\" shp_dir = dir2pp + "Geodata\\Shapefiles\\" quant_dir = dir2pp + "Quantities\\" # file and variable settings xlsx_target = dir2pp + prj_name + "_assessment_" + version + ".xlsx" feature_dict = { "Large wood": 211, "ELJs (plantings)": 212, "Bioengineering (veget.)": 213, "Bioengineering (mineral)": 214, "Angular boulders (instream)": 215 } # LOOK UP INPUT RASTERS try: logger.info("Looking up maximum lifespan rasters ...") max_lf_plants = arcpy.Raster(ras_dir + "max_lf_pl_c.tif") logger.info(" >> Vegetation plantings OK.") logger.info(" -- OK (MaxLifespan raster read)\n") except: logger.info("ERROR: Could not find max. lifespan Rasters.") return -1 logger.info("Looking up specific bioengineering lifespan rasters ...") logger.info(best_plant_dir + "lf_wood.tif") try: lf_wood = arcpy.Raster(lf_dir + "lf_wood.tif") logger.info(" >> Added Streamwood.") except: lf_wood = Float(0) logger.info( "WARNING: Could not find Lifespan Raster (%slf_wood.tif)." % lf_dir) logger.info( " > Go to the Lifespan Tab and create lifespan rasters for the Bioengineering feature group." ) logger.info(" > Applying 0-lifespans instead.") try: lf_bio = arcpy.Raster(lf_dir + "lf_bio_v_bio.tif") logger.info(" >> Added Other bioengineering.") except: lf_bio = Float(0) logger.info( "WARNING: Could not find Lifespan Raster (%slf_bio_v_bio.tif)." % lf_dir) logger.info( " > Go to the Lifespan Tab and create lifespan rasters for the Bioengineering feature group." ) logger.info(" > Applying 0-lifespans instead.") logger.info(" -- OK (Bioengineering raster read)") # EVALUATE BEST STABILIZATION FEATURES try: logger.info("Assessing best features for plant stabilization.") arcpy.env.extent = max_lf_plants.extent best_stab = Con( max_lf_plants <= crit_lf, Con( ~IsNull(lf_wood), Con(lf_wood > crit_lf, Int(feature_dict["Large wood"]), Int(feature_dict["ELJs (plantings)"])), Con( ~IsNull(lf_bio), Con(lf_bio > crit_lf, Int(feature_dict["Bioengineering (veget.)"]), Int(feature_dict["Bioengineering (mineral)"])), Int(feature_dict["Angular boulders (instream)"])))) logger.info(" -- OK (Stabilization assessment.)\n") except: logger.info("ERROR: Best stabilization assessment failed.") return -1 # SAVE RASTERS try: logger.info("Saving results raster as " + ras_dir + "plant_stab.tif") best_stab.save(ras_dir + "plant_stab.tif") logger.info(" -- OK (Raster saved.)\n") except: logger.info("ERROR: Result geofile saving failed.") return -1 # SHAPEFILE CONVERSION AND STATS try: logger.info("Extracting quantities from geodata ...") logger.info(" >> Converting results raster to polygon shapefile ...") p_stab_shp = shp_dir + "Plant_stab.shp" try: arcpy.RasterToPolygon_conversion(Int(best_stab), p_stab_shp, "NO_SIMPLIFY") if not fGl.verify_shp_file(p_stab_shp): logger.info( "NO STABILIZATION MEASURE IDENTIFIED (EMPTY: %s)." % p_stab_shp) logger.info(fGl.open_file(xlsx_target)) return -1 except: logger.info( "NOTHING TO DO. Consider to increase the critical lifespan threshold." ) logger.info(" >> Calculating area statistics ... ") try: arcpy.AddField_management(p_stab_shp, "F_AREA", "FLOAT", 9) except: logger.info( " * field F_AREA already exists or the dataset is opened by another software." ) try: arcpy.CalculateGeometryAttributes_management( p_stab_shp, geometry_property=[["F_AREA", "AREA"]], area_unit=area_units) except: logger.info(" * no plant stabilization applicable ") logger.info(" >> Adding field (stabilizing feature) ... ") try: arcpy.AddField_management(p_stab_shp, "Stab_feat", "TEXT") except: logger.info(" * field Stab_feat already exists ") logger.info(" >> Evaluating field (stabilizing feature) ... ") inv_feature_dict = {v: k for k, v in feature_dict.items()} code_block = "inv_feature_dict = " + str(inv_feature_dict) try: arcpy.CalculateField_management(p_stab_shp, "Stab_feat", "inv_feature_dict[!gridcode!]", "PYTHON", code_block) except: logger.info(" * no plant stabilization added ... ") logger.info(" >> Exporting tables ...") arcpy.TableToTable_conversion(p_stab_shp, quant_dir, "plant_stab.txt") logger.info(" -- OK (Quantity export)\n") except: logger.info("ERROR: Shapefile operations failed.") return -1 # PREPARE AREA DATA (QUANTITIES) logger.info("Processing table statistics ...") write_dict = {} for k in feature_dict.keys(): write_dict.update({k: 0.0}) # set to zero for surface count stat_data = fGl.read_txt(quant_dir + "plant_stab.txt") logger.info(" >> Extracting relevant area sizes ...") for row in stat_data: try: write_dict[inv_feature_dict[int(row[0])]] += row[1] except: logger.info(" --- Unknown key: " + str(int(row[0]))) if unit == "us": logger.info(" >> Converting ft2 to acres ...") for k in write_dict.keys(): write_dict[k] = write_dict[k] * float(ft2_to_acres) logger.info(" -- OK (Area extraction finished)\n") # WRITE AREA DATA TO EXCEL FILE logger.info("Writing results to costs workbook (sheet: from_geodata) ...") fGl.write_dict2xlsx(write_dict, xlsx_target, "B", "C", 12) # CLEAN UP useless shapefiles logger.info("Cleaning up redundant shapefiles ...") arcpy.env.workspace = shp_dir all_shps = arcpy.ListFeatureClasses() for shp in all_shps: if "_del" in str(shp): try: arcpy.Delete_management(shp) except: logger.info( str(shp) + " is locked. Remove manually to avoid confusion.") arcpy.env.workspace = dir2pp + "Geodata\\" logger.info(" -- OK (Clean up)\n")
def Define_HRU_Attributes_arcgis( prj_crs, trg_crs, hruinfo, dissolve_filedname_list, Sub_ID, Landuse_ID, Soil_ID, Veg_ID, Other_Ply_ID_1, Other_Ply_ID_2, Landuse_info_data, Soil_info_data, Veg_info_data, DEM, Path_Subbasin_Ply, Inmportance_order, min_hru_area_pct_sub, OutputFolder, tempfolder, ): """Generate attributes of each HRU Function will generate attributes that are needed by Raven and other hydrological models for each HRU Parameters ---------- processing : qgis object context : qgis object Project_crs : string the EPSG code of a projected coodinate system that will be used to calcuate HRU area and slope. hru_layer : qgis object a polygon layer generated by overlay all input polygons dissolve_filedname_list : list a list contain column name of ID in each polygon layer in Merge_layer_list Sub_ID : string The column name of the subbasin id in the subbasin polygon Landuse_ID : string the the column name in landuse polygon and Landuse_info csv indicate the landuse ID. when Path_Landuse_Ply is not provided. The Landuse ID should be 1: land, -1: lake. Soil_ID : string the the column name in soil polygon and soil_info csv indicate the soil ID. when soil polygon is not provided. The Soil ID in Soil_info should be the same as Landuse ID. Veg_ID : string the the column name in vegetation polygon and veg_info csv indicate the vegetation ID. when Veg polygon is not provided. The Veg ID in Veg_info should be the same as Landuse ID. Landuse_info : Dataframe a dataframe that contains landuse information, including following attributes: Landuse_ID (can be any string) - integer, the landuse ID in the landuse polygon LAND_USE_C - string, the landuse class name for each landuse Type Soil_info : Dataframe a dataframe that contains soil information, including following attributes: Soil_ID (can be any string) - integer, the Soil ID in the soil polygon SOIL_PROF - string, the Soil profile name for each soil type Veg_info : Dataframe a dataframe file that contains vegetation information, including following attributes: Veg_ID (can be any string) - integer, the vegetation ID in the vegetation polygon VEG_C - string, the vegetation class name for each vegetation Type DEM : string (optional) the path to a raster elevation dataset, that will be used to calcuate average apspect, elevation and slope within each HRU. if no data is provided, basin average value will be used for each HRU. Path_Subbasin_Ply : string It is the path of the subbasin polygon, which is generated by toolbox. if not generated by toolbox, the attribute table should including following attribute. ##############Subbasin related attributes########################### SubID - integer, The subbasin Id DowSubId - integer, The downstream subbasin ID of this subbasin IsLake - integer, If the subbasin is a lake / reservior subbasin. 1 yes, <0, no IsObs - integer, If the subbasin contains a observation gauge. 1 yes, < 0 no. RivLength - float, The length of the river in current subbasin in m RivSlope - float, The slope of the river path in current subbasin, in m/m FloodP_n - float, Flood plain manning's coefficient, in - Ch_n - float, main channel manning's coefficient, in - BkfWidth - float, the bankfull width of the main channel in m BkfDepth - float, the bankfull depth of the main channel in m HyLakeId - integer, the lake id LakeVol - float, the Volume of the lake in km3 LakeDepth - float, the average depth of the lake m LakeArea - float, the area of the lake in m2 OutputFolder : String The path to a folder to save result during the processing Returns: ------- HRU_draf_final : qgis object it is a polygon object that generated by overlay all input layers and inlcude all needed attribue for hydrological model like RAVEN """ num = str(np.random.randint(1, 10000 + 1)) hruinfo["LAND_USE_C"] = '-9999' hruinfo["VEG_C"] = '-9999' hruinfo["SOIL_PROF"] = '-9999' hruinfo["HRU_CenX"] = -9999.9999 hruinfo["HRU_CenY"] = -9999.9999 hruinfo["HRU_ID_New"] = -9999 hruinfo["HRU_Area"] = -9999.99 hruinfo.spatial.to_featureclass(location=os.path.join( tempfolder, 'hru_add_area.shp'), overwrite=True, sanitize_columns=False) arcpy.CalculateGeometryAttributes_management( os.path.join(tempfolder, 'hru_add_area.shp'), [["HRU_Area", "AREA_GEODESIC"]], area_unit='SQUARE_METERS', coordinate_system=arcpy.SpatialReference(prj_crs)) ### calcuate area of each feature hruinfo_area = pd.DataFrame.spatial.from_featureclass( os.path.join(tempfolder, 'hru_add_area.shp')) hruinfo_area['HRU_ID'] = hruinfo_area['FID'] + 1 hruinfo_area["HRU_ID_New"] = hruinfo_area["FID"] + 1 hruinfo_area_update_attribute = Determine_HRU_Attributes( hruinfo_area, Sub_ID, Landuse_ID, Soil_ID, Veg_ID, Other_Ply_ID_1, Other_Ply_ID_2, Landuse_info_data, Soil_info_data, Veg_info_data, ) save_modified_attributes_to_outputs( mapoldnew_info=hruinfo_area_update_attribute, tempfolder=tempfolder, OutputFolder=tempfolder, cat_name='finalcat_hru_info.shp', riv_name='#', Path_final_riv='#', dis_col_name='HRU_ID_New') arcpy.RepairGeometry_management( os.path.join(tempfolder, 'finalcat_hru_info.shp')) arcpy.CalculateGeometryAttributes_management( os.path.join(tempfolder, 'finalcat_hru_info.shp'), [["HRU_Area", "AREA_GEODESIC"]], area_unit='SQUARE_METERS', coordinate_system=arcpy.SpatialReference(prj_crs)) hruinfo_new = pd.DataFrame.spatial.from_featureclass( os.path.join(tempfolder, 'finalcat_hru_info.shp')) hruinfo_simple = simplidfy_hrus( min_hru_pct_sub_area=min_hru_area_pct_sub, hruinfo=hruinfo_new, importance_order=Inmportance_order, ) save_modified_attributes_to_outputs(mapoldnew_info=hruinfo_simple, tempfolder=tempfolder, OutputFolder=tempfolder, cat_name='hru_simple.shp', riv_name='#', Path_final_riv='#', dis_col_name='HRU_ID_New') arcpy.CalculateGeometryAttributes_management( os.path.join(tempfolder, 'hru_simple.shp'), [["HRU_CenX", "CENTROID_X"], ["HRU_CenY", "CENTROID_Y"]], coordinate_system=arcpy.SpatialReference(4326)) # arcpy.JoinField_management( os.path.join(tempfolder, 'hru_simple.shp'), 'SubId', os.path.join(Path_Subbasin_Ply), 'SubId', ) arcpy.CalculateGeometryAttributes_management( os.path.join(tempfolder, 'hru_simple.shp'), [["HRU_Area", "AREA_GEODESIC"]], area_unit='SQUARE_METERS', coordinate_system=arcpy.SpatialReference(prj_crs)) if DEM != "#": arcpy.Project_management( os.path.join(tempfolder, 'hru_simple.shp'), os.path.join(tempfolder, "hru_proj.shp"), arcpy.SpatialReference(int(prj_crs)), ) extract_dem = ExtractByMask(DEM, os.path.join(tempfolder, 'hru_simple.shp')) arcpy.ProjectRaster_management(extract_dem, os.path.join(tempfolder, "demproj.tif"), arcpy.SpatialReference(int(prj_crs)), "NEAREST") Slopeout = Slope(extract_dem, "DEGREE", 0.3043) Slopeout.save(os.path.join(OutputFolder, 'slope.tif')) Aspectout = Aspect(extract_dem) # Save the output Aspectout.save(os.path.join(OutputFolder, 'aspect.tif')) table_zon_slope = ZonalStatisticsAsTable( os.path.join(tempfolder, "hru_proj.shp"), 'HRU_ID_New', Slopeout, os.path.join(tempfolder, "slope_zonal.dbf"), "DATA", "MEAN", ) table_zon_aspect = ZonalStatisticsAsTable( os.path.join(tempfolder, "hru_proj.shp"), 'HRU_ID_New', Aspectout, os.path.join(tempfolder, "asp_zonal.dbf"), "DATA", "MEAN", ) table_zon_elev = ZonalStatisticsAsTable( os.path.join(tempfolder, "hru_proj.shp"), 'HRU_ID_New', os.path.join(tempfolder, "demproj.tif"), os.path.join(tempfolder, "elv_zonal.dbf"), "DATA", "MEAN", ) hruinfo_add_slp_asp = pd.DataFrame.spatial.from_featureclass( os.path.join(tempfolder, 'hru_simple.shp')) table_slp = Dbf_To_Dataframe( os.path.join(tempfolder, "slope_zonal.dbf")) table_asp = Dbf_To_Dataframe(os.path.join(tempfolder, "asp_zonal.dbf")) table_elv = Dbf_To_Dataframe(os.path.join(tempfolder, "elv_zonal.dbf")) table_slp['HRU_S_mean'] = table_slp['MEAN'] table_slp = table_slp[['HRU_ID_New', 'HRU_S_mean']] table_asp['HRU_A_mean'] = table_asp['MEAN'] table_asp = table_asp[['HRU_ID_New', 'HRU_A_mean']] table_elv['HRU_E_mean'] = table_elv['MEAN'] table_elv = table_elv[['HRU_ID_New', 'HRU_E_mean']] hruinfo_add_slp_asp = pd.merge(hruinfo_add_slp_asp, table_slp, on='HRU_ID_New') hruinfo_add_slp_asp = pd.merge(hruinfo_add_slp_asp, table_asp, on='HRU_ID_New') hruinfo_add_slp_asp = pd.merge(hruinfo_add_slp_asp, table_elv, on='HRU_ID_New') hruinfo_add_slp_asp['HRU_ID'] = hruinfo_add_slp_asp['FID'] + 1 else: arcpy.AddMessage(os.path.join(tempfolder, 'hru_simple.shp')) hruinfo_add_slp_asp = pd.DataFrame.spatial.from_featureclass( os.path.join(tempfolder, 'hru_simple.shp')) hruinfo_add_slp_asp['HRU_ID'] = hruinfo_add_slp_asp['FID'] + 1 hruinfo_add_slp_asp['HRU_S_mean'] = hruinfo_add_slp_asp['BasSlope'] hruinfo_add_slp_asp['HRU_A_mean'] = hruinfo_add_slp_asp['BasAspect'] hruinfo_add_slp_asp['HRU_E_mean'] = hruinfo_add_slp_asp['MeanElev'] return hruinfo_add_slp_asp
#Intersect segment shape file arcpy.FeatureToLine_management(segmentShapeFile, segmentShapeFile2, "0.001 Meters") #Add fields segment shape file, length and id arcpy.AddField_management(in_table=segmentShapeFile2, field_name="Length_m", field_type="DOUBLE") arcpy.AddField_management(in_table=segmentShapeFile2, field_name="Segment_ID", field_type="DOUBLE") #Calculate segment length of splitted traffic shape file arcpy.CalculateGeometryAttributes_management( in_features=segmentShapeFile2, geometry_property=[["Length_m", "LENGTH_GEODESIC"]], length_unit="METERS") #Calculate segment ID arcpy.CalculateField_management(in_table=segmentShapeFile2, field="Segment_ID", expression="!FID!") #Spatial join segment file with traffic mapping file fieldmappings = arcpy.FieldMappings() fieldmappings.addTable(segmentShapeFile2) fieldmappings.addTable(mappingTrafficShapeFile) arcpy.SpatialJoin_analysis(target_features=segmentShapeFile2, join_features=mappingTrafficShapeFile, out_feature_class=segmentShapeFile3, join_operation="JOIN_ONE_TO_ONE",
def calculatePercentAreaOfPolygonAinPolygonB(input_geodatabase, fcPolygon1, fcPolygon2): desc1 = arcpy.Describe(fcPolygon1) desc2 = arcpy.Describe(fcPolygon2) desc3 = arcpy.Describe(input_geodatabase) if desc1.shapeType != "Polygon" or desc2.shapeType != "Polygon": print("You need to input polygons.") if desc3.dataType != "Workspace": print("You need to input a geodatabase or workspace.") #Continue on if the files were given correctly else: #intersect the two feature classes into one #Need to do this, especially when second feature class is block groups, #so we can add up all the parks in each specific block a_intersect_b = "fcPolygon1_intersect_fcPolygon2" arcpy.Intersect_analysis([fcPolygon2, fcPolygon1], a_intersect_b) #Create a field to hold the area calculation input_area_field = "fcPolygon1_area_sq_meters" arcpy.AddField_management(a_intersect_b, input_area_field, "DOUBLE") #Calculate the area of the field arcpy.CalculateGeometryAttributes_management( a_intersect_b, [[input_area_field, "AREA_GEODESIC"]], "METERS") #get one fcPolygon1 area value for each block group (i.e. add them up) #Use dictionary, has unique keys, to add up area in each block group fcPolygon2_dict = {} #find the GEOID or FIPS field geoid_field = "" for field in arcpy.ListFields(fcPolygon2): #already found a geoid if geoid_field == "": if "geoid" in field.name.lower(): geoid_field = field.name elif "fips" in field.name.lower(): geoid_field = field.name else: "There is no GEOID or FIPS code in fcPolygon2" #use search cursor to go through each intersected item with arcpy.da.SearchCursor(a_intersect_b, [geoid_field, input_area_field]) as cursor: for row in cursor: #get the geoid value geoid = row[0] #check if geoid exists in dictionary, if so, add to it, if not, create a new key if geoid in fcPolygon2_dict.keys(): fcPolygon2_dict[geoid] += row[1] else: fcPolygon2_dict[geoid] = row[1] del row del cursor #create new field in fcPolygon 2 to hold our intersected Area value fc1_area_field = "fcPolygon1_area_sq_meters" arcpy.AddField_management(fcPolygon2, fc1_area_field, "DOUBLE") #use update cursor to fill in values of the field we just created with arcpy.da.UpdateCursor(fcPolygon2, [geoid_field, fc1_area_field]) as cursor: for row in cursor: #if geoid is in dictionary, add its value, otherwise add a zero if row[0] in fcPolygon2_dict.keys(): row[1] = fcPolygon2_dict[row[0]] else: row[1] = 0 #update it cursor.updateRow(row) del row del cursor #Create a new field to hold our calculated area percentage fc1_pct_field = "fcPolygon1_pct_area_sq_meters" arcpy.AddField_management(fcPolygon2, fc1_pct_field, "DOUBLE") #Calculate the field's area #find Polygon 2's area field name, otherwise create it fc2_area_field = "" for field in arcpy.ListFeatureClasses(fcPolygon2): if fc2_area_field == "": if "area" in field.name.lower(): fc2_area_field = field.name #check if area field is still not found if fc2_area_field == "": #create the area field arcpy.AddField_management(fcPolygon2, "area_sq_meters", "DOUBLE") arcpy.CalculateGeometryAttributes_management( fcPolygon2, [["area_sq_meters", "AREA_GEODESIC"]], "METERS") fc2_area_field = "area_sq_meters" #Calculate the field #create expression for the calculation expression = "!" + fc1_area_field + "!/!" + fc2_area_field + "!" print(expression) arcpy.CalculateField_management(fcPolygon2, fc1_pct_field, expression, "PYTHON3")
def calculate_hdiff_tomer(name, ras_full, dem_full, mask, tomer): ras = ras_full + mask #dem = dem_full + mask dem = dem_full ras_filt = Con(ras, ras, '', 'Value = 1 OR Value = 2') pol = arcpy.RasterToPolygon_conversion(ras_filt, name + '_pol.shp', 'NO_SIMPLIFY', 'VALUE') arcpy.AddField_management(pol, 'area', 'FLOAT') arcpy.AddField_management(pol, 'perim', 'FLOAT') arcpy.AddField_management(pol, 'hdiff', 'FLOAT') arcpy.AddField_management(pol, 'vol_chg', 'FLOAT') if tomer == True: pol_in = arcpy.Buffer_analysis(pol, 'pol_in2.shp', '-2 METERS') pol_fl = arcpy.MakeFeatureLayer_management(pol_in, 'pol_ft') arcpy.CalculateGeometryAttributes_management( pol_fl, [['area', 'AREA'], ['perim', 'PERIMETER_LENGTH']], 'METERS', 'SQUARE_METERS') cursor = arcpy.da.UpdateCursor(pol_fl, ['area', 'perim']) for row in cursor: if row[0] / row[1] < 0.5: cursor.deleteRow() del cursor pol_out = arcpy.CopyFeatures_management(pol_fl, name + '_filt.shp') pol_fl = arcpy.MakeFeatureLayer_management(pol_out, 'pol_ft') pol_erd = arcpy.SelectLayerByAttribute_management( pol_fl, '', 'gridcode = 2') pol_erd_buff = arcpy.Buffer_analysis(pol_erd, name + '_erd_pol.shp', '5 METERS') pol_dep = arcpy.SelectLayerByAttribute_management( pol_fl, '', 'gridcode = 1') pol_dep_buff = arcpy.Buffer_analysis(pol_dep, name + '_dep_pol.shp', '2 METERS') elif tomer == False: pol_out = pol arcpy.CalculateField_management(pol_out, 'area', "!SHAPE.AREA!", 'PYTHON') pol_fl = arcpy.MakeFeatureLayer_management(pol_out, 'pol') pol_erd = arcpy.SelectLayerByAttribute_management( pol_fl, '', 'gridcode = 2') pol_erd_buff = arcpy.Buffer_analysis(pol_erd, 'erd_pol.shp', '3 METERS') pol_dep = arcpy.SelectLayerByAttribute_management( pol_fl, '', 'gridcode = 1') pol_dep_buff = arcpy.CopyFeatures_management(pol_dep, 'dep_pol.shp') dem_land = Con(ras, dem, '', 'Value = 0 OR Value = 1') dem_water = Con(ras, dem, '', 'Value = 2 OR Value = 3') land = pd.DataFrame( arcpy.da.TableToNumPyArray( ZonalStatisticsAsTable(pol_erd_buff, 'Id', dem_land, 'land_dem', 'DATA', 'MEDIAN'), ['ID', 'MEDIAN'])) water = pd.DataFrame( arcpy.da.TableToNumPyArray( ZonalStatisticsAsTable(pol_erd_buff, 'Id', dem_water, 'water_dem', 'DATA', 'MEDIAN'), ['ID', 'MEDIAN'])) erosion = land.join(water.set_index('ID'), on='ID', how='inner', lsuffix='_L', rsuffix='_W') erosion = erosion.assign(hdiff=erosion.MEDIAN_W - erosion.MEDIAN_L) min_ras = ZonalStatistics(pol_dep_buff, 'Id', dem, 'MINIMUM', 'DATA') hdiff_dep = dem - min_ras deposition = pd.DataFrame( arcpy.da.TableToNumPyArray( ZonalStatisticsAsTable(pol_dep_buff, 'Id', hdiff_dep, 'hdiff_dep', 'DATA', 'MEAN'), ['ID', 'MEAN'])) deposition.columns = ['ID', 'hdiff'] hdiff = deposition.append(erosion[['ID', 'hdiff']]) hdiff.to_csv(name + 'hdiff.csv') ids = list(hdiff['ID']) cursor = arcpy.da.UpdateCursor(pol_out, ['Id', 'hdiff', 'area', 'vol_chg']) for row in cursor: if row[0] in ids: hdiff_row = hdiff.loc[hdiff['ID'] == row[0]] value = hdiff_row.iloc[0][1] row[1] = value / 100 row[3] = row[1] * row[2] cursor.updateRow(row) del cursor return (pol_out)
out_coor_system= "GEOGCS['GCS_WGS_1984',DATUM['D_WGS_1984',SPHEROID['WGS_1984',6378137.0,298.257223563]],PRIMEM['Greenwich',0.0],UNIT['Degree',0.0174532925199433]]", transform_method=[], in_coor_system= "PROJCS['CGCS2000_3_Degree_GK_Zone_35',GEOGCS['GCS_China_Geodetic_Coordinate_System_2000',DATUM['D_China_2000',SPHEROID['CGCS2000',6378137.0,298.257222101]],PRIMEM['Greenwich',0.0],UNIT['Degree',0.0174532925199433]],PROJECTION['Gauss_Kruger'],PARAMETER['False_Easting',35500000.0],PARAMETER['False_Northing',0.0],PARAMETER['Central_Meridian',105.0],PARAMETER['Scale_Factor',1.0],PARAMETER['Latitude_Of_Origin',0.0],UNIT['Meter',1.0]]", preserve_shape="NO_PRESERVE_SHAPE", max_deviation="", vertical="NO_VERTICAL") arcpy.AddFields_management( in_table=temptfeature, field_description=[["Left", "DOUBLE", "", "", "", ""], ["Bottom", "DOUBLE", "", "", "", ""], ["Right", "DOUBLE", "", "", "", ""], ["Top", "DOUBLE", "", "", "", ""]]) arcpy.CalculateGeometryAttributes_management( temptfeature, [["Left", "EXTENT_MIN_X"], ["Bottom", "EXTENT_MIN_Y"], ["Right", "EXTENT_MAX_X"], ["Top", "EXTENT_MAX_Y"]]) arcpy.Statistics_analysis(in_table=temptfeature, out_table=tempttable, statistics_fields=[["Left", "MIN"], ["Bottom", "MIN"], ["Right", "MAX"], ["Top", "MAX"]], case_field=[]) cursor = arcpy.SearchCursor(tempttable) for row in cursor: leftmin = row.MIN_Left rightmax = row.MAX_Right bottommin = row.MIN_Bottom topmax = row.MAX_Top
point = output = date = #format Jan01_19 #create line line = arcpy.PointsToLine_management(point, output + "\\" + date, "NAME") print("line created") #split into polyline by vertex line_split = arcpy.SplitLine_management(line, output + "\\" + date + "_s") print("line split") #add length field, calculate length = arcpy.AddField_management(line_split, "length_m", "DOUBLE") print("length field added") arcpy.CalculateGeometryAttributes_management(line_split, [["length_m", "LENGTH_GEODESIC"]], "METERS") print("geometry calculated") ###mark as possible problem prob = arcpy.AddField_management(line_split, "mark", "TEXT") print("mark field added") cursor = arcpy.da.UpdateCursor(line_split, ["length_m", "mark"]) print("cursor created") for updateRow in cursor: if updateRow[0] > 2000: updateRow[1] = 1 elif updateRow[0] <= 2000: updateRow[1] = 0 cursor.updateRow(updateRow)
def main(): """ The main routine which processes stuff """ arcpy.AddMessage("Setting up workspace and parameters.") arcpy.env.overwriteOutput = True workspace = r"in_memory" arcpy.env.workspace = workspace output_date = datetime.datetime.now().strftime("%Y%m%d") output = arcpy.GetParameterAsText(0) if output == "#" or not output: output = r"D:\Projects\TreeProject\TreeProject.gdb\treecrops_{}".format( output_date) # Set more variables output_fc = output.split("\\")[-1] output_workspace = output.split(output_fc)[0][:-1] print(output_fc) print(output_workspace) # Create output FC if it doesn't exist if arcpy.Exists(output): pass else: print("Creating output feature class") arcpy.CreateFeatureclass_management(output_workspace, output_fc, "POLYGON", spatial_reference=4283) # For feature service connection # noinspection SpellCheckingInspection gis = GIS("http://arcgis.com", "jmckechn_une", "Leoj270592") print("Credentials verified: {}".format(gis)) rest_url = "https://services5.arcgis.com/3foZbDxfCo9kcPwP/arcgis/rest/services/" \ "TreeCrops_Editing/FeatureServer/0" # Try copying editing service to local gdb trees = output_workspace + "\\fs_download_{}".format(output_date) if arcpy.Exists(trees): arcpy.Delete_management(trees) print("Removing existing {}".format(trees)) else: print("Copying from service: {}".format(rest_url)) arcpy.CopyFeatures_management(rest_url, trees) print("Copy successful: {}".format(trees)) # Copy data to memory and set up feature layer trees_memory = r"in_memory/trees" trees_lyr = "trees_lyr" query = "(commodity IS NOT NULL AND commodity <> 'other') AND (stage IS NULL OR stage = '1' OR stage = '2')" print("Copying data to memory") arcpy.CopyFeatures_management(trees, trees_memory) arcpy.MakeFeatureLayer_management(trees_memory, trees_lyr, where_clause=query) # Remove ag_ features if they exist rem_list = arcpy.ListFeatureClasses("ag_*") for i in rem_list: print("Deleting {}".format(i)) arcpy.Delete_management(workspace + r"/" + i) # Get unique values print("Getting unique attributes from fields") field_list = ["commodity", "source", "year"] com_list = [] for i in field_list: if i == "commodity": u_list = unique_values(trees_lyr, i) for j in u_list: com_list.append(j) else: pass # # Remove banana for speed :) (testing) # print("Remove banana for speed :) (testing)") # com_list.remove("banana") print(com_list) update_list = [] print("Looping through selecting unique features to aggregate") for c in com_list: print(" Working on {} features".format(c)) print(" selecting") selection_query = "commodity = '{}'".format(c) arcpy.SelectLayerByAttribute_management(trees_lyr, "NEW_SELECTION", selection_query) ag_output = "ag_{}".format(c) print(" aggregating") arcpy.AggregatePolygons_cartography(trees_lyr, ag_output, "25 METERS", "1 HECTARES", "1 HECTARES", "ORTHOGONAL") print(" Adding and calculating field") arcpy.AddField_management(ag_output, "commodity", "TEXT") arcpy.CalculateField_management(ag_output, "commodity", "'{}'".format(c), "ARCADE") print(" created {}".format(ag_output)) # Copy aggregated features to output location print(" copying to output location") arcpy.CopyFeatures_management(ag_output, output + "_{}".format(c)) update_list.append(output + "_{}".format(c)) # make a list of ag_... feature classes and loop update analysis tool print("Joining features back together with update tool") loop_no = len(com_list) update_no = 0 update_output = output + "_update{}".format(update_no) print("update_list: {}".format(update_list)) print("loop_no: {}".format(loop_no)) print("update_no: {}".format(update_no)) print("update_output: {}".format(update_output)) arcpy.CopyFeatures_management(update_list[0], update_output) while update_no + 1 <= loop_no: loop_name = update_list[update_no].split("{}_".format(output_fc))[-1] print(" {} loop ({}/{})".format(loop_name, update_no + 1, loop_no)) if update_no == 0: arcpy.Update_analysis(update_output, update_list[update_no], output + "_update{}".format(update_no + 1)) print(" variables: {}, {}, {}".format( update_output, update_list[update_no], output + "_update{}".format(update_no + 1))) else: arcpy.Update_analysis(output + "_update{}".format(update_no), update_list[update_no], output + "_update{}".format(update_no + 1)) print(" variables: {}, {}, {}".format( output + "_update{}".format(update_no), update_list[update_no], output + "_update{}".format(update_no + 1))) update_no += 1 arcpy.CopyFeatures_management(output + "_update{}".format(loop_no), output) # join attributes back to output print("Trying spatial join") arcpy.SpatialJoin_analysis(output, trees_memory, output + "_join", "JOIN_ONE_TO_ONE") # Add hectare field arcpy.AddField_management(output + "_join", "hectares", "DOUBLE") arcpy.CalculateGeometryAttributes_management( output + "_join", [["hectares", "AREA_GEODESIC"]], area_unit="HECTARES", coordinate_system=4283) # Overwrite output print("Explode, and overwriting output") arcpy.MultipartToSinglepart_management(output + "_join", output) # Clean up fields join_field_del_list = [ "Join_Count", "TARGET_FID", "comment", "other", "stage", "edit", "Shape__Area", "Shape__Length", "commodity_1", "ORIG_FID", "field", "review", "imagery", "industry", "uncertain" ] print("Deleting the following fields:") print(join_field_del_list) for i in join_field_del_list: arcpy.DeleteField_management(output, i) # Assign domains print("Assigning domains") arcpy.AssignDomainToField_management(output, "source", "source_domain") arcpy.AssignDomainToField_management(output, "commodity", "commodity_domain") arcpy.AssignDomainToField_management(output, "year", "year_domain") arcpy.env.workspace = output_workspace # Delete all working features except actual output, topology and original tree data. print("Trying to delete unnecessary data") del_fc_list = arcpy.ListFeatureClasses("{}_*".format(output_fc)) print(del_fc_list) for i in del_fc_list: print("Deleting {}".format(i)) arcpy.Delete_management(output_workspace + "\\{}".format(i)) # Derive points print("Creating points") arcpy.FeatureToPoint_management(output_fc, output + "_point", "INSIDE")
previous_id = None with arcpy.da.UpdateCursor(start_pts_sorted, ['id']) as cursor: for row in cursor: if row[0] == previous_id: #print("--Current:{}, Previous:{}".format(row[0], previous_id)) cursor.deleteRow() previous_id = row[0] arcpy.DeleteField_management(start_pts, 'Temp_ID') start_pts = start_pts_sorted # add xy coords to start points arcpy.AddField_management(start_pts, field_name="xcoord", field_type='double') arcpy.AddField_management(start_pts, field_name="ycoord", field_type='double') arcpy.CalculateGeometryAttributes_management(start_pts, [["xcoord", "POINT_X"],["ycoord", "POINT_Y"]]) # this might allow this section to run in command line # with arcpy.da.UpdateCursor(start_pts, ['xcoord', 'ycoord', 'SHAPE@X', 'SHAPE@Y']) as cursor: # for row in cursor: # row[0] = row[2] # row[1] = row[3] # cursor.updateRow(row) # create xy key to start points arcpy.AddField_management(start_pts, field_name="XY_Key", field_type='string') arcpy.CalculateField_management(start_pts,"XY_Key",'"!{}!|!{}!"'.format('xcoord', 'ycoord')) # End points print('--generating end points')
def save_modified_attributes_to_outputs(mapoldnew_info, tempfolder, OutputFolder, cat_name, riv_name, Path_final_riv, dis_col_name='SubId'): mapoldnew_info.spatial.to_featureclass(location=os.path.join( tempfolder, 'updateattri.shp'), overwrite=True, sanitize_columns=False) arcpy.Dissolve_management(os.path.join(tempfolder, 'updateattri.shp'), os.path.join(OutputFolder, cat_name), [dis_col_name]) arcpy.JoinField_management(os.path.join(OutputFolder, cat_name), dis_col_name, os.path.join(tempfolder, 'updateattri.shp'), dis_col_name) arcpy.DeleteField_management(os.path.join(OutputFolder, cat_name), [ "SubId_1", "Id", "nsubid2", "nsubid", "ndownsubid", "Old_SubId", "Old_DowSub", "Join_Count", "TARGET_FID", "Id", "SubID_Oldr", "HRU_ID_N_1", "HRU_ID_N_2", "facters" ]) if riv_name != '#': arcpy.CalculateGeometryAttributes_management( os.path.join(OutputFolder, cat_name), [["centroid_x", "CENTROID_X"], ["centroid_y", "CENTROID_Y"]], coordinate_system=arcpy.SpatialReference(4326)) cat_colnms = mapoldnew_info.columns drop_cat_colnms = cat_colnms[cat_colnms.isin([ "SHAPE", "SubId_1", "Id", "nsubid2", "nsubid", "ndownsubid", "Old_DowSub", "Join_Count", "TARGET_FID", "Id", "SubID_Oldr", "HRU_ID_N_1", "HRU_ID_N_2", "facters", "Old_DowSubId" ])] cat_pd = mapoldnew_info.drop(columns=drop_cat_colnms) riv_pd = pd.DataFrame.spatial.from_featureclass(Path_final_riv) riv_pd['Old_SubId'] = riv_pd['SubId'] # remove all columns riv_pd = riv_pd[['SHAPE', 'Old_SubId']] riv_pd = pd.merge(riv_pd, cat_pd, on='Old_SubId', how='left') riv_pd = riv_pd.drop(columns=['Old_SubId']) mask = np.logical_or(riv_pd['RivLength'] > 0, riv_pd['Lake_Cat'] > 0) riv_pd = riv_pd[mask] riv_pd.drop(columns=['centroid_x', 'centroid_y']) riv_pd.spatial.to_featureclass(location=os.path.join( tempfolder, 'riv_attri.shp'), overwrite=True, sanitize_columns=False) arcpy.Dissolve_management(os.path.join(tempfolder, 'riv_attri.shp'), os.path.join(OutputFolder, riv_name), ["SubId"]) arcpy.JoinField_management(os.path.join(OutputFolder, riv_name), "SubId", os.path.join(tempfolder, 'riv_attri.shp'), "SubId") arcpy.DeleteField_management(os.path.join(OutputFolder, riv_name), [ "SubId_1", "Id", "nsubid2", "nsubid", "ndownsubid", "Old_SubId", "Old_DowSub", "Join_Count", "TARGET_FID", "Id", "SubID_Oldr", "HRU_ID_N_1", "HRU_ID_N_2", "facters", 'centroid_x', 'centroid_y' ])
arcpy.AddField_management(in_table=shppoint, field_name="LATITUDE", field_type="DOUBLE", field_precision=None, field_scale=None, field_length=None, field_alias="", field_is_nullable="NULLABLE", field_is_required="NON_REQUIRED", field_domain="") arcpy.Integrate_management(in_features=[[shppoint, ""]], cluster_tolerance="2.2 Meters") arcpy.CalculateGeometryAttributes_management( in_features=shppoint, geometry_property=[["LONGITUDE", "POINT_X"], ["LATITUDE", "POINT_Y"]], length_unit="", area_unit="", coordinate_system= "GEOGCS['GCS_WGS_1984',DATUM['D_WGS_1984',SPHEROID['WGS_1984',6378137.0,298.257223563]],PRIMEM['Greenwich',0.0],UNIT['Degree',0.0174532925199433]]" ) with arcpy.EnvManager(XYTolerance="1 Meters"): arcpy.Dissolve_management(in_features=shppoint, out_feature_class=dissolve, dissolve_field=["LONGITUDE", "LATITUDE"], statistics_fields=[["LONGITUDE", "FIRST"], ["LATITUDE", "FIRST"]], multi_part="MULTI_PART", unsplit_lines="DISSOLVE_LINES") arcpy.DeleteIdentical_management(in_dataset=dissolve, fields=["LONGITUDE", "LATITUDE"], xy_tolerance="1 Meters", z_tolerance=0)
# Append boundary to template fc created above arcpy.Append_management(area_boundary, tnc_polygon_bnd, "NO_TEST") # Save copy of the boundary fc arcpy.CopyFeatures_management(tnc_polygon_bnd, path + "\\TNC_MappedAreas") # Clip boundaries to to extent of Nevada nv = r"K:\GIS3\States\NV\Nevada_83.shp" tnc_polygon_bnd = path + "\\tnc_project_area_polygons" arcpy.Clip_analysis(tnc_polygon_bnd, nv, path + "\\TNC_MappedAreas_NV") # Calculate acres of each mapped area tnc_areas = path + "\\TNC_MappedAreas_NV" arcpy.AddField_management(tnc_areas, "Acres", "DOUBLE") arcpy.CalculateGeometryAttributes_management(tnc_areas, [["Acres", "AREA"]], "", "ACRES", env.outputCoordinateSystem) # TNC Mapped Areas polygon will be used to mask DRI and Landfire phreatophyte data # If TNC did not map GDEs in their study areas, then there are not GDEs there #------------------------------------------------------------------------------- # Erase portion of Wassuk Range overlapped by Mt Grant # Isolate Mt Grant boundary grant = arcpy.Copy_management(tnc_areas, "mtgrant_bnd") with arcpy.da.UpdateCursor(grant, ['FILENAME']) as cursor: for row in cursor: if row[0] != "MtGrant_MaskSYSxCLA052918.tif": cursor.deleteRow() del cursor
("CalcDensity(!YEAR2018!,!DEVACRES!)"), "PYTHON3", codeblock) arcpy.AddField_management("JobsTAZ", "JobDens2018", "DOUBLE") arcpy.CalculateField_management("JobsTAZ", "JobDens2018", ("CalcDensity(!YEAR2018!,!DEVACRES!)"), "PYTHON3", codeblock) # Copy roads fc for calculations fields arcpy.CopyFeatures_management("Roads", "RoadsCopy") # Calc block length print('Calculating block length...') arcpy.AddField_management("RoadsCopy", "BlockLength", "DOUBLE") arcpy.CalculateGeometryAttributes_management("RoadsCopy", [["BlockLength", "LENGTH"]], "FEET_US", '') # Calc Bike and Ped Facility Ratings print('Calculating bike and ped facility ratings...') arcpy.AddField_management("RoadsCopy", "BikeLRating", "LONG") arcpy.AddField_management("RoadsCopy", "BikeRRating", "LONG") arcpy.AddField_management("RoadsCopy", "PedLRating", "LONG") arcpy.AddField_management("RoadsCopy", "PedRRating", "LONG") arcpy.AddField_management("RoadsCopy", "MeanBikeRating", "DOUBLE") arcpy.AddField_management("RoadsCopy", "MeanPedRating", "DOUBLE") bikefc = "RoadsCopy" fieldsL = ["BIKE_L", "BikeLRating"]
import datetime import time ###### Get date for tagging our output files ###### dateTag = datetime.datetime.today().strftime( '%Y%m%d') # looks somethin like this 20181213 ###### Get the folder path for all our stuff ###### MotherShipFolder = r"C:/Users/jtouzel/Downloads/DomainFieldDataDownload" calcCS = r"C:/Users/jtouzel/AppData/Roaming/Esri/Desktop10.6/ArcMap/Coordinate Systems/WGS 1984.prj" folderlist = [] # Create empty list for all the sub-folder paths for i in os.listdir(MotherShipFolder): # get a list of all the sub-folders folderlist.append(MotherShipFolder + "/" + i) for i in folderlist: # get into each sub-folder for n in os.listdir(i): # get the geodatabase out of the sub-folder if n.endswith('.gdb'): env.workspace = os.path.join(i, n) # set the gdb as the workspace for fc in arcpy.ListFeatureClasses(): # find the point layer desc = arcpy.Describe( fc) # first let's check if there's lat and long fields flds = desc.fields for fld in flds: # if they are there we need to delete them if fld == "LAT": arcpy.Delete_management(fc, ["LAT", "LONG"]) # add the fields back in and calculate them arcpy.AddField_management(fc, "LAT", "DOUBLE") arcpy.AddField_management(fc, "LONG", "DOUBLE") arcpy.env.outputCoordinateSystem = calcCS # set the output coordinate system so we can actually get lat long coords arcpy.CalculateGeometryAttributes_management( fc, [["LAT", "POINT_Y"], ["LONG", "POINT_X"]])
all_fields = arcpy.ListFields(zcta_area_file) pari = False ar = False for field in all_fields: print(field.name) if field.name == "PariLenMI": arcpy.DeleteField_management(zcta_area_file, "PariLenMI") if field.name == "AreaMI": arcpy.DeleteField_management(zcta_area_file, "AreaMI") if pari and ar: break arcpy.AddField_management(zcta_area_file, "PariLenMI", "float") arcpy.AddField_management(zcta_area_file, "AreaMI", "float") arcpy.CalculateGeometryAttributes_management( zcta_area_file, [["PariLenMI", "PERIMETER_LENGTH"]], "MILES_US") arcpy.CalculateGeometryAttributes_management(zcta_area_file, [["AreaMI", "AREA"]], area_unit="SQUARE_MILES_US") sc = arcpy.SearchCursor(zcta_area_file) row = sc.next() while row: zcta = row.getValue(zcta_field_name) pari = row.getValue("PariLenMI") area = row.getValue("AreaMI") time = a_t + b_peri_t * pari + b_peri_t * math.sqrt(area) distance = a_d + b_peri_d * pari + b_peri_d * math.sqrt(area) outdata.write("{0},{1},{2},{3},0\n".format(zcta, zcta, time, distance)) row = sc.next()
def main(maxlf_dir=str(), min_lf=float(), prj_name=str(), unit=str(), version=str()): """ delineate optimum plantings required input variables: min_lf = minimum plant lifespan where plantings are considered prj_name = "TBR" # (corresponding to folder name) prj_name = "BartonsBar" (for example) unit = "us" or "si" version = "v10" # type() = 3-char str: vII """ logger = logging.getLogger("logfile") logger.info("PLACE OPTIMUM PLANT SPECIES ----- ----- ----- -----") features = cDef.FeatureDefinitions(False) # read feature IDs (required to identify plants) if unit == "us": area_units = "SQUARE_FEET_US" ft2_to_acres = config.ft2ac else: area_units = "SQUARE_METERS" ft2_to_acres = 1.0 arcpy.CheckOutExtension('Spatial') arcpy.gp.overwriteOutput = True path2pp = config.dir2pm + prj_name + "_" + version + "\\" # folder settings ras_dir = path2pp + "Geodata\\Rasters\\" shp_dir = path2pp + "Geodata\\Shapefiles\\" quant_dir = path2pp + "Quantities\\" fGl.del_ovr_files(path2pp) # Delete temporary raster calculator files # file settings xlsx_target = path2pp + prj_name + "_assessment_" + version + ".xlsx" action_ras = {} try: logger.info("Looking up MaxLifespan Rasters ...") arcpy.env.workspace = maxlf_dir action_ras_all = arcpy.ListRasters() logger.info(" >> Source directory: " + maxlf_dir) arcpy.env.workspace = path2pp + "Geodata\\" for aras in action_ras_all: for plant in features.id_list_plants: if plant in str(aras): logger.info(" -- found: " + maxlf_dir + str(aras)) action_ras.update({aras: arcpy.Raster(maxlf_dir + aras)}) if ("max" in str(aras)) and ("plant" in str(aras)): max_lf_plants = arcpy.Raster(maxlf_dir + aras) logger.info(" -- OK (read Rasters)\n") except: logger.info("ERROR: Could not find action Rasters.") return -1 # CONVERT PROJECT SHAPEFILE TO RASTER try: logger.info("Converting Project Shapefile to Raster ...") arcpy.env.workspace = shp_dir arcpy.PolygonToRaster_conversion("ProjectArea.shp", "AreaCode", ras_dir + "ProjectArea.tif", cell_assignment="CELL_CENTER", priority_field="NONE", cellsize=1) logger.info(" -- OK. Loading project raster ...") arcpy.env.workspace = path2pp + "Geodata\\" prj_area = arcpy.Raster(ras_dir + "ProjectArea.tif") logger.info(" -- OK (Shapefile2Raster conversion)\n") except arcpy.ExecuteError: logger.info("ExecuteERROR: (arcpy).") logger.info(arcpy.GetMessages(2)) arcpy.AddError(arcpy.GetMessages(2)) return -1 except Exception as e: logger.info("ExceptionERROR: (arcpy).") logger.info(e.args[0]) arcpy.AddError(e.args[0]) return -1 except: logger.info("ExceptionERROR: (arcpy) Conversion failed.") return -1 # CONVERT EXISTING PLANTS SHAPEFILE TO RASTER try: logger.info("Converting PlantExisting.shp Shapefile to Raster ...") arcpy.env.workspace = shp_dir arcpy.PolygonToRaster_conversion(shp_dir + "PlantExisting.shp", "gridcode", ras_dir + "PlantExisting.tif", cell_assignment="CELL_CENTER", priority_field="NONE", cellsize=1) arcpy.env.workspace = path2pp + "Geodata\\" logger.info(" -- OK (Shapefile2Raster conversion)\n") except arcpy.ExecuteError: logger.info("ExecuteERROR: (arcpy).") logger.info(arcpy.GetMessages(2)) arcpy.AddError(arcpy.GetMessages(2)) arcpy.CreateRasterDataset_management(ras_dir, "PlantExisting.tif", "1", "8_BIT_UNSIGNED", "World_Mercator.prj", "3", "", "PYRAMIDS -1 NEAREST JPEG", "128 128", "NONE", "") except Exception as e: logger.info("ExceptionERROR: (arcpy).") logger.info(e.args[0]) arcpy.AddError(e.args[0]) except: logger.info("WARNING: PlantExisting.shp is corrupted or non-existent.") logger.info(" >> Loading existing plant raster ...") existing_plants = arcpy.Raster(ras_dir + "PlantExisting.tif") # RETAIN RELEVANT PLANTINGS ONLY shp_4_stats = {} try: logger.info("Analyzing optimum plant types in project area ...") logger.info(" >> Cropping maximum lifespan Raster ... ") arcpy.env.extent = prj_area.extent max_lf_crop = Con((~IsNull(prj_area) & ~IsNull(max_lf_plants)), Con(IsNull(existing_plants), Float(max_lf_plants))) logger.info(" >> Saving crop ... ") max_lf_crop.save(ras_dir + "max_lf_pl_c.tif") logger.info(" -- OK ") occupied_px_ras = "" for aras in action_ras.keys(): plant_ras = action_ras[aras] if not('.tif' in str(aras)): aras_tif = str(aras) + '.tif' aras_no_end = aras else: aras_tif = aras aras_no_end = aras.split('.tif')[0] logger.info(" >> Applying MaxLifespan Raster({}) where lifespan > {} years.".format(str(plant_ras), str(min_lf))) __temp_ras__ = Con((~IsNull(prj_area) & ~IsNull(plant_ras)), Con((Float(max_lf_plants) >= min_lf), (max_lf_plants * plant_ras))) if arcpy.Exists(occupied_px_ras): logger.info(" >> Reducing to relevant pixels only ... ") __temp_ras__ = Con((IsNull(occupied_px_ras) & IsNull(existing_plants)), __temp_ras__) occupied_px_ras = Con(~IsNull(occupied_px_ras), occupied_px_ras, __temp_ras__) else: occupied_px_ras = __temp_ras__ __temp_ras__ = Con(IsNull(existing_plants), __temp_ras__) logger.info(" >> Saving raster ... ") __temp_ras__.save(ras_dir + aras_tif) logger.info(" >> Converting to shapefile (polygon for area statistics) ... ") try: shp_ras = Con(~IsNull(__temp_ras__), 1, 0) arcpy.RasterToPolygon_conversion(shp_ras, shp_dir + aras_no_end + ".shp", "NO_SIMPLIFY") except: logger.info(" !! " + aras_tif + " is not suitable for this project.") arcpy.env.workspace = maxlf_dir logger.info(" >> Calculating area statistics ... ") try: arcpy.AddField_management(shp_dir + aras_no_end + ".shp", "F_AREA", "FLOAT", 9) except: logger.info(" * field F_AREA already exists ") try: arcpy.CalculateGeometryAttributes_management(shp_dir + aras_no_end + ".shp", geometry_property=[["F_AREA", "AREA"]], area_unit=area_units) shp_4_stats.update({aras: shp_dir + aras_no_end + ".shp"}) except: shp_4_stats.update({aras: config.dir2pm + ".templates\\area_dummy.shp"}) logger.info(" !! Omitting (not applicable) ...") arcpy.env.workspace = path2pp + "Geodata\\" logger.info(" -- OK (Shapefile and raster analyses)\n") logger.info("Calculating area statistics of plants to be cleared for construction ...") try: arcpy.AddField_management(shp_dir + "PlantClearing.shp", "F_AREA", "FLOAT", 9) except: logger.info(" * cannot add field F_AREA to %s (already exists?)" % str(shp_dir + "PlantClearing.shp")) try: arcpy.CalculateGeometryAttributes_management(shp_dir + "PlantClearing.shp", geometry_property=[["F_AREA", "AREA"]], area_unit=area_units) shp_4_stats.update({"clearing": shp_dir + "PlantClearing.shp"}) except: shp_4_stats.update({"clearing": config.dir2pm + ".templates\\area_dummy.shp"}) logger.info(" * no clearing applicable ") logger.info(" -- OK (Statistic calculation)\n") except arcpy.ExecuteError: logger.info("ExecuteERROR: (arcpy).") logger.info(arcpy.GetMessages(2)) arcpy.AddError(arcpy.GetMessages(2)) return -1 except Exception as e: logger.info("ExceptionERROR: (arcpy).") logger.info(e.args[0]) arcpy.AddError(e.args[0]) return -1 except: logger.info("ExceptionERROR: (arcpy) Conversion failed.") return -1 # CLEAN UP useless shapefiles logger.info("Cleaning up redundant shapefiles ...") arcpy.env.workspace = shp_dir all_shps = arcpy.ListFeatureClasses() for shp in all_shps: if "_del" in str(shp): try: arcpy.Delete_management(shp) except: logger.info(str(shp) + " is locked. Remove manually to avoid confusion.") arcpy.env.workspace = path2pp + "Geodata\\" logger.info(" -- OK (Clean up)\n") # EXPORT STATISTIC TABLES logger.info("Exporting table statistics ...") stat_files = {} for ts in shp_4_stats.keys(): try: logger.info(" >> Exporting " + str(shp_4_stats[ts]) + " area ...") arcpy.TableToTable_conversion(shp_4_stats[ts], quant_dir, "plant_" + ts + ".txt") stat_files.update({ts: quant_dir + "plant_" + ts + ".txt"}) except: logger.info(" !! EXPORT FAILED (empty %s ?)" % str(ts)) logger.info(" -- OK (Table export)\n") arcpy.CheckInExtension('Spatial') # PREPARE AREA DATA (QUANTITIES) logger.info("Processing table statistics ...") write_dict = {} for sf in stat_files.keys(): stat_data = fGl.read_txt(stat_files[sf]) logger.info(" --> Extracting relevant area ...") polygon_count = 0 total_area_ft2 = 0.0 for row in stat_data: if row[0] == 1: total_area_ft2 += row[1] polygon_count += 1 write_dict.update({sf: total_area_ft2 * float(ft2_to_acres)}) logger.info(" --> OK") logger.info(" -- OK (Area extraction finished).") # WRITE AREA DATA TO EXCEL FILE logger.info("Writing results ...") fGl.write_dict2xlsx(write_dict, xlsx_target, "B", "C", 4) logger.info(" -- OK (PLANT PLACEMENT FINISHED)\n") return ras_dir