def addNotation(notationType, fieldsToAdd, joinFieldName, outputTable, scratchTable, inputXField, inputYField, inputCoordinateFormat, inputSpatialReference): ''' ''' try: arcpy.AddMessage( "Converting & appending {0} with fields {1} ...".format( notationType, fieldsToAdd)) arcpy.ConvertCoordinateNotation_management(outputTable, scratchTable, inputXField, inputYField, inputCoordinateFormat, notationType, joinFieldName, inputSpatialReference) arcpy.JoinField_management(outputTable, joinFieldName, scratchTable, joinFieldName, fieldsToAdd) # TRICKY DDLat, DDLon names are hard-coded in ConvertCoordinateNotation so # We need to rename one of these to have both DD and DD_NUMERIC in same output table if notationType == 'DD_NUMERIC': arcpy.AlterField_management(outputTable, 'DDLat', 'DDLatNumeric', 'DDLatNumeric') arcpy.AlterField_management(outputTable, 'DDLon', 'DDLonNumeric', 'DDLonNumeric') return True except arcpy.ExecuteError: error = True # Get the tool error messages msgs = arcpy.GetMessages() arcpy.AddError(msgs) #print msgs #UPDATE print(msgs)
def rename_field(occupation_name): try: arcpy.AlterField_management(out_features, 'Join_Count_1', occupation_name, occupation_name) except: arcpy.AlterField_management(out_features, 'Join_Count', occupation_name, occupation_name)
def addDlaField(table, targetName, field, attrs, type, length): # add a field to a dla Geodatabase retcode = False try: attrs.index(targetName) # check if field exists, compare uppercase retcode = True except: try: upfield = False tupper = targetName.upper() for nm in attrs: nupper = nm.upper() if tupper == nupper and nupper != 'GLOBALID': # if case insensitive match, note GlobalID cannot be renamed nm2 = nm + "_1" retcode = arcpy.AlterField_management(table, nm, nm2) retcode = arcpy.AlterField_management( table, nm2, targetName) addMessageLocal("Field altered: " + nm + " to " + targetName) upfield = True if upfield == False: retcode = addField(table, targetName, type, length) addMessageLocal("Field added: " + targetName) except: showTraceback() for attr in attrs: # drop any field prefix from the source layer (happens with map joins) thisFieldName = attr[attr.rfind(".") + 1:] if thisFieldName.upper() == targetName.upper(): addMessageLocal( "WARNING: Existing field name '" + thisFieldName + "' conflicts with new field name '" + targetName + "'. Identical names with different case are not supported by databases!\n" ) return retcode
def add_domain(): # 挂阈值 domain_flag = '' tb_flag = "" tbfld_flag = "" for k in range(nrows): if table.cell(k, 0).value == '': continue # print table.row_values(k) alst = table.row_values(k) if '' in alst: alst = list(set(alst)) alst.remove('') if len(alst) == 1: tb_flag = table.cell(k, 0).value elif len(alst) > 2: domain_flag = table.cell(k, 0).value tbfld_flag = table.cell(k, 3).value # print tb_flag # print "\t",domain_flag,tbfld_flag try: if tbfld_flag == "ENGINEETYPE": arcpy.AlterField_management(tb_flag, tbfld_flag, new_field_alias="工程类型", field_type="SHORT") elif tbfld_flag == "SYSDATAFROM": arcpy.AlterField_management(tb_flag, tbfld_flag, field_type="SHORT") arcpy.AssignDomainToField_management(tb_flag, tbfld_flag, domain_flag) except RuntimeError: print arcpy.GetMessages()
def execute(self, params, messages): MarxanDB = params[0].valueAsText species_lyr = params[1].valueAsText elsubid1 = params[2].valueAsText pu_layer = os.path.join(MarxanDB, "pulayer", "pulayer.shp") arcpy.env.workspace = "in_memory" arcpy.AddMessage("tabulating area") pu_fc = arcpy.FeatureClassToFeatureClass_conversion( pu_layer, env.workspace, "pu_fc") tab_area = arcpy.TabulateIntersection_analysis(pu_fc, "id", species_lyr, "tab_area", elsubid1) arcpy.AlterField_management(tab_area, "id", "pu") arcpy.AlterField_management(tab_area, elsubid1, "species") arcpy.AlterField_management(tab_area, "AREA", "amount") puvspr_dat = os.path.join(MarxanDB, "input", "puvspr.dat") fields = ["species", "pu", "amount"] with open(puvspr_dat, "a+") as f: f.write('\t'.join(fields) + '\n') with arcpy.da.SearchCursor(tab_area, fields) as cursor: for row in cursor: f.write('\t'.join([str(r) for r in row]) + '\n') f.close() return
def execute(self, params, messages): MarxanDB = params[0].valueAsText arcpy.env.workspace = "in_memory" pulayer = os.path.join(MarxanDB, "pulayer", "pulayer.shp") poly_neighbors = arcpy.PolygonNeighbors_analysis( pulayer, "poly_neighbors", "id") arcpy.AlterField_management(poly_neighbors, "src_id", "id1") arcpy.AlterField_management(poly_neighbors, "nbr_id", "id2") arcpy.AlterField_management(poly_neighbors, "LENGTH", "boundary") bound_dat = os.path.join(MarxanDB, "input", "bound.dat") fields = ["id1", "id2", "boundary"] with open(bound_dat, "a+") as f: f.write('\t'.join(fields) + '\n') with arcpy.da.SearchCursor(poly_neighbors, fields) as cursor: for row in cursor: f.write('\t'.join([str(r) for r in row]) + '\n') f.close() return
def compute_CII_per_island(): # Compute the CII score per island as zonal statistics arcpy.CheckOutExtension("Spatial") arcpy.sa.ZonalStatisticsAsTable("buffered_islands", "STRONG", "cii_overall_score_ras1", "islands_with_CII_scores_table", "DATA", "MEAN") # Rename field MEAN to CII_Score_Overall arcpy.AlterField_management("islands_with_CII_scores_table", "MEAN", "CII_Score_Overall") # Join the resulting table back to the original islands feature class arcpy.AddJoin_management("islands", "STRONG", "islands_with_CII_scores_table", "STRONG", "KEEP_ALL") # Save to a new feature class arcpy.CopyFeatures_management("islands", "islands_with_score_with_nulls") arcpy.RemoveJoin_management("islands") # Remove any islands where the CII_Score_Overall is null ("> 0" does that) # Note: I did it differently from the other ones, because CopyFeatures_management() # was not dropping the nulls for some reason arcpy.Select_analysis("islands_with_score_with_nulls", "islands_with_score", 'islands_with_CII_scores_table_CII_Score_Overall > 0') # Delete some unnecessary fields drop_fields = ["islands_with_CII_scores_table_OBJECTID","islands_with_CII_scores_table_STRONG", "islands_with_CII_scores_table_COUNT", "islands_with_CII_scores_table_AREA"] arcpy.DeleteField_management("islands_with_score", drop_fields) # Rename some fields to their alias, to get rid of exagerated long names field_list = arcpy.ListFields("islands_with_score") for field in field_list: print(field.name) if field.aliasName in ["STRONG", "Orig_Length", "CII_Score_Overall"]: arcpy.AlterField_management("islands_with_score", field.name, field.aliasName) # Clean up remove_intermediary_layers(["buffered_islands","islands_with_CII_scores_table", "islands_with_score_with_nulls"]) turn_off_layers(["islands_with_score"])
def find_trail_island_intersections(): # Create the field mapping object that will be used in the spatial join field_mappings = arcpy.FieldMappings() # Populate the field mapping object with the fields from both feature classes of interest field_mappings.addTable("trails") field_mappings.addTable("islands_with_score") #Set up merge rules # Orig_length -- we will sum up the length of all intersecting islands for each trail set_up_merge_rules("Orig_length","Sum", field_mappings) # STRONG (i.e., island ID) -- we will count the number of all intersecting islands for each trail set_up_merge_rules("STRONG", "Count", field_mappings) # CII_Score_Overall -- we will compute the CII score average among all intersecting islands for each trail set_up_merge_rules("CII_Score_Overall", "Mean", field_mappings) # Do the spatial join to find all the islands that intersect with each trail arcpy.SpatialJoin_analysis("trails", "islands_with_score", "trails_intersecting", "JOIN_ONE_TO_ONE", "KEEP_ALL", field_mappings, "INTERSECT", search_radius="50 Meters") # Rename fields arcpy.AlterField_management("trails_intersecting", "Orig_length", "Length_of_All_Islands", "Length_of_All_Islands") arcpy.AlterField_management("trails_intersecting", "STRONG", "Num_of_Islands", "Num_of_Islands") arcpy.AlterField_management("trails_intersecting", "CII_Score_Overall", "Trail_CII_Score", "Trail_CII_Score") # Delete an unnecessary field drop_fields = ["TARGET_FID"] arcpy.DeleteField_management("trails_intersecting", drop_fields) # Clean up remove_intermediary_layers([]) turn_off_layers(["trails_intersecting"])
def calculate_fields(feature_class_path): bsmt_expression = "def basement(bool):\n if bool.upper() == 'Y':\n return 0\n elif bool.upper() == 'N':\n return 1\n else:\n return -1" arcpy.CalculateField_management(feature_class_path, "NOBSMT", "basement( !Basement!)", "PYTHON_9.3", bsmt_expression) #notes_expression = "def basement(bool):\n if bool.upper() == 'Y':\n return 'Has Basement = YES'\n elif bool.upper() == 'N':\n return 'Has Basement = NO'\n else:\n return -1" #arcpy.CalculateField_management(feature_class_path, "NOTES", "basement( !Basement!)", "PYTHON_9.3", notes_expression) arcpy.AlterField_management(feature_class_path, "Address", "SITEADDR") arcpy.AlterField_management(feature_class_path, "Elevation", "SURVEYFFE") arcpy.DeleteField_management(feature_class_path, "Basement")
def check_bound_shp(boundary_shp, f_name): sZone_fields = [f.name for f in arcpy.ListFields(boundary_shp)] if "Zone_no" in sZone_fields: arcpy.DeleteField_management(boundary_shp, "Zone_no") zone_info = os.path.join(scratch, "tmp_shp_copy") if arcpy.Exists(zone_info): arcpy.Delete_management(zone_info) arcpy.CopyFeatures_management(boundary_shp, zone_info) # create sequential numbers for reaches arcpy.AddField_management(zone_info, "Zone_no", "LONG") with arcpy.da.UpdateCursor(zone_info, ["Zone_no"]) as cursor: id = 0 for row in cursor: id += 1 row[0] = id cursor.updateRow(row) result = arcpy.GetCount_management(zone_info) count = int(result.getOutput(0)) print("number of features in boundary shaprefile is {0}".format(count)) if count == 0: sys.exit("Error - boundary shp file provided contains no data!") elif count == 1: if f_name is None or f_name == "": if "Area_Name" in sZone_fields: arcpy.DeleteField_management(zone_info, "Area_Name") arcpy.AddField_management(zone_info, 'Area_Name', 'TEXT') with arcpy.da.UpdateCursor(zone_info, ['Area_Name']) as cursor: for row in cursor: row[0] = "AOI" cursor.updateRow(row) if f_name in sZone_fields: arcpy.AlterField_management(zone_info, f_name, new_field_name='Area_Name') print("single AOI provided - outputs") else: if f_name is None or f_name == "": sys.exit("######## Error - Aborting script ############ \n" "Multiple shapes provided without unique names \n" "### Add new field and create unique names ###") if f_name in sZone_fields: arcpy.AlterField_management(zone_info, f_name, new_field_name='Area_Name') return zone_info
def validarUsuario(ubigeo, usuario, carpeta): cursor = cursorDB() cursor.execute( "SELECT SEGMENTISTA FROM TB_MODULO_ASIGN_R WHERE UBIGEO = '{}' AND SEGMENTISTA = '{}'" .format(ubigeo, usuario)) validacion = len([x[0] for x in cursor]) if validacion == 1: rutaFD = crearFileGDB(ubigeo, carpeta) importarFeatureClass(ubigeo, rutaFD) arcpy.env.scratchWorkspace = rutaFD arcpy.AlterField_management(r'{}\CCPP_{}'.format(rutaFD, ubigeo), "LLAVE_CCPP", "IDCCPP") arcpy.AlterField_management(r'{}\CCPP_N_{}'.format(rutaFD, ubigeo), "LLAVE_CCPP", "IDCCPP") arcpy.SetParameterAsText(3, r'{}\CCPP_{}'.format(rutaFD, ubigeo)) arcpy.SetParameterAsText(4, r'{}\TRACK_{}'.format(rutaFD, ubigeo)) arcpy.SetParameterAsText(5, r'{}\AER_{}'.format(rutaFD, ubigeo)) arcpy.SetParameterAsText(6, r'{}\DIST_{}'.format(rutaFD, ubigeo)) arcpy.SetParameterAsText(7, r'{}\CN_{}'.format(rutaFD, ubigeo)) arcpy.SetParameterAsText(8, r'{}\HIDRO_{}'.format(rutaFD, ubigeo)) # **************************************************** arcpy.SetParameterAsText(9, r'{}\CCPP_N_{}'.format(rutaFD, ubigeo)) arcpy.SetParameterAsText(10, r'{}\RUTA_OLD_{}'.format(rutaFD, ubigeo)) params = arcpy.GetParameterInfo() for param in params: if '{}'.format(param.name) == 'centrosPoblados': param.symbology = lyrCcpp elif '{}'.format(param.name) == 'track': param.symbology = lyrTrack elif '{}'.format(param.name) == 'aer': param.symbology = lyrAer elif '{}'.format(param.name) == 'distrito': param.symbology = lyrDist ##-------------------------------------------------- elif '{}'.format(param.name) == 'curvasnivel': param.symbology = lyrCN elif '{}'.format(param.name) == 'hidrografia': param.symbology = lyrHidro else: arcpy.AddError("\n" * 2 + "-" * 80 + "\n") arcpy.AddError("OBSERVACION:") arcpy.AddWarning( "EL usuario ingresado no esta relacionado a este ubigeo.") arcpy.AddError("\n" + "-" * 80 + "\n" * 2) raise arcpy.ExecuteError cursor.close()
def generar_reporte(): # función que crea el reportes de resultados fields = arcpy.ListFields(capaOrigenIrradiacion) # se crea un objeto de tipo field info fieldinfo = arcpy.FieldInfo() for field in fields: if field.name == campoAreaMax: fieldinfo.addField(field.name, "Acum_Max", "VISIBLE", "") else: fieldinfo.addField(field.name, field.name, "HIDDEN", "") tabla_basereporte = arcpy.MakeTableView_management( in_table=capaOrigenIrradiacion, out_view="basereporte", field_info=fieldinfo, workspace="in_memory") arcpy.CopyRows_management(in_rows=tabla_basereporte, out_table="in_memory\\BaseReporte") arcpy.AddField_management(in_table="in_memory\\BaseReporte", field_name="Id_cluster", field_type="LONG", field_alias="Id_cluster") arcpy.AlterField_management(in_table="in_memory\\BaseReporte", field="Acum_Max", new_field_alias="Acum_Max") arcpy.CalculateField_management(in_table="in_memory\\BaseReporte", field="Id_cluster", expression="float(!%s!)" % (capturarIdCapa("in_memory\\basereporte")), expression_type="PYTHON_9.3") arcpy.CopyRows_management(in_rows="in_memory\\BaseReporte", out_table=ruta_gdb + "\\" + "BaseReporte") arcpy.Statistics_analysis(in_table=capaFinalClusters, out_table="in_memory\\tabla_cluster", statistics_fields="%s SUM;Shape_Area SUM" % (campoAreaPoligono), case_field="CLUSTER") capa = Layer( ruta_gdb + "\\" + "BaseReporte", [], ws ) # instancia un objeto de la clase Layer para aceder a sus propiedades arcpy.AlterField_management(in_table="in_memory\\tabla_cluster", field="SUM_%s" % (campoAreaPoligono), new_field_name="Mag_Acum_Total", new_field_alias="Mag_Acum_Total") arcpy.AlterField_management(in_table="in_memory\\tabla_cluster", field="SUM_Shape_Area", new_field_name="Area_Acum_Total", new_field_alias="Area_Acum_Total") capa.addjoinCursorMultiple( "in_memory\\tabla_cluster", "Id_cluster", "CLUSTER", ["Mag_Acum_Total", "Area_Acum_Total" ]) # realiza un addjoin cursor multiple entre las capas especificadas
def joinWithCatalogAndSave(fc_workspace, featureClass, rc_workspace, rasterCatalog, outWorkspace): featureLayer = 'feature_layer' arcpy.MakeFeatureLayer_management(os.path.join(fc_workspace, featureClass), featureLayer) arcpy.AddJoin_management(featureLayer, "Photo", os.path.join(rc_workspace, rasterCatalog), "Name") outLayer = os.path.join(outWorkspace, featureClass + "withPhotos") arcpy.CopyFeatures_management(featureLayer, outLayer) # Delete unnecessary fields and rename field names to appropriate names extra_fields = [ "{}_{}".format(rasterCatalog, name) for name in ["OBJECTID", "Name", "Shape_Length", "Shape_Area"] ] for field in extra_fields: arcpy.DeleteField_management(outLayer, field) fieldsToRename = filter(lambda name: featureClass in name, (f.name for f in arcpy.ListFields(outLayer))) for fieldName in fieldsToRename: if "Raster" in fieldName: # arcpy.AlterField_management( outLayer,fieldName,"Image" ) continue else: newFieldName = fieldName[len(featureClass) + 1:] arcpy.AlterField_management(outLayer, fieldName, newFieldName)
def gdf_to_fc(gdf, fc): """ converts a geopandas dataframe to a layer in a ESRI file geodatabase. Notes: - gdf have to have geometry field. """ if 'geometry' not in gdf.columns.values: sys.exit() GDB, workspace, dirname, fc_name = gdb_path(fc) # convert fc to a gpkg in a temporary directory tmp_dir = tempfile.TemporaryDirectory() p = Path(tmp_dir.name) n = fc_name + '.shp' gdf.to_file(str(p/n)) fc_cols = get_fields(str(p/n))[2:] #copy the file into a feature class fc = arcpy.CopyFeatures_management(str(p/n), fc) gdf_cols = gdf.columns.tolist() gdf_cols.remove('geometry') #fixing the columns if gdf_cols: col_dict = {col: gdf_cols[indx] for indx, col in enumerate(fc_cols) } for col in col_dict: if col_dict[col] != col: arcpy.AlterField_management(fc, col, col_dict[col], clear_field_alias="true") # Delete temporary directory tmp_dir.cleanup() return fc
def create_new_feature_class(in_fc, out_fc, flds=None, where=None, shp_prefix=None): """ Basically a shortcut to get a feature class with different fields and optionally a where condition. TODO: add something to wrap up doing joins? Parameters: ----------- in_fc: string Path to the input feature class. out_fc: string Path to the output feature class. flds: dictionary, optional, default None Dictionary of fields to retain, the keys are the existing names and values are the output names. where: string, optional, defualt None Definition query to apply. """ create_layer('__killme', in_fc, flds, where, shp_prefix) arcpy.CopyFeatures_management('__killme', out_fc) # look into this?, for some reason arcpro cant get a schema lock? # arcpy.Delete_management('__killme') # at 10.3 field aliases persist, so set these to match the field name for f in arcpy.ListFields(out_fc): if f.name != f.aliasName and f.type != 'Geometry': arcpy.AlterField_management(out_fc, f.name, new_field_alias=f.name)
def rename_to_standard(table): arcpy.AddMessage("Renaming.") # look up the values based on the rename tag this_files_dir = os.path.dirname(os.path.abspath(__file__)) os.chdir(this_files_dir) geo_file = os.path.abspath('../geo_metric_provenance.csv') with open(geo_file) as csv_file: reader = csv.DictReader(csv_file) mapping = {row['subgroup_original_code']: row['subgroup'] for row in reader if row['main_feature'] in rename_tag and row['main_feature']} arcpy.AddMessage(mapping) # update them for old, new in mapping.items(): arcpy.AddMessage(new) old_fname = '{}'.format(old) new_fname = '{}_{}_pct'.format(rename_tag, new) if arcpy.ListFields(table, old_fname): try: # same problem with AlterField limit of 31 characters here. arcpy.AlterField_management(table, old_fname, new_fname, clear_field_alias=True) except: cu.rename_field(table, old_fname, new_fname, deleteOld=True) return table
def alterPlotSettingIDFieldName(outputLocation): FSVegGDBPath = os.path.join(outputLocation, 'FSVeg_Spatial_WT.gdb') arcpy.MakeTableView_management(os.path.join( FSVegGDBPath, "FSVeg_Spatial_WT_Photos"), "FSVeg_Spatial_WT_PhotosTable") arcpy.AlterField_management("FSVeg_Spatial_WT_PhotosTable", "pl_setting_id", "Setting_ID", "Setting_ID")
def arcpy_call(): for name in names: if name == "OID" or name == "Shape": continue # new_name = (safe_prefix + name)[:10] new_name = safe_prefix + name arcpy.AlterField_management(str(self), name, new_name),
def JoinMeanToTable(in_data, zonal_stats, zone_field, field_name): """ Joins the MEAN field of the provided table to the Map_Units_Dissolve attribute table, deleting existing versions of the field if neccesary. :param in_data: the Map Unit Dissolve feature class :param zonal_stats: the table with statistics to join :param zone_field: the name of the field to join to ("Map_Unit_ID") :param field_name: a field name to save the joined field as a string :return: None """ # Delete existing instances of the new field or MEAN, if present existingFields = arcpy.ListFields(in_data) for field in existingFields: if field.name.lower() == field_name.lower(): arcpy.DeleteField_management(in_data, field.name) if field.name == "MEAN": arcpy.DeleteField_management(in_data, field.name) # Join MEAN field from ZonalStats table to Map_Units_Dissolve joinTable = zonal_stats joinField = zone_field field = "MEAN" arcpy.JoinField_management(in_data, zone_field, joinTable, joinField, field) # Change name of joined field arcpy.AlterField_management(in_data, field, field_name)
def runPrefixCanrateZSat(projectGDBPath): # arcpy.AddMessage("\nRenaming cancer rate statistics table fields...") # Set local variables for Alter Field table = os.path.join(projectGDBPath, "canrateZSat") prefix = "can_" # newTableName = "canrateZSatPrefix" fieldList = arcpy.ListFields(table) # Execute Alter Field for canrateZSaT table for field in fieldList: if (field.name != "OBJECTID"): prefixName = prefix + field.name arcpy.AlterField_management(table, field.name, prefixName, prefixName) # arcpy.AddMessage("\n" + arcpy.GetMessages()) del prefixName # Delete local variables del table, prefix, fieldList # arcpy.AddMessage("\nCancer rate statistics table fields renamed.")
def AlterField(inFeatureClass, filed, new_field_name, new_field_alias): try: arcpy.AlterField_management(inFeatureClass, filed, new_field_name, new_field_alias) #print 'AlterField successful' except Exception: print arcpy.GetMessages()
def alterField(in_table, field, new_field_name, new_field_alias): try: arcpy.AlterField_management(in_table=in_table, field=field, new_field_name=new_field_name, new_field_alias=new_field_alias) except: pass
def CountPointsInGrids(self, gridName, poi): arcpy.SpatialJoin_analysis(gridName, poi, "temp", "JOIN_ONE_TO_ONE") arcpy.AlterField_management("temp", "Join_Count", poi + "_num") for field in arcpy.ListFields("temp"): if "_num" not in field.name and not field.required: arcpy.DeleteField_management("temp", field.name) arcpy.CopyFeatures_management("temp", gridName) arcpy.Delete_management("temp") print("Layer {0} has been processed".format(poi))
def _extract_fields(base, rasters): """ _extract_fields Extracts the values of rasters to add the information of its value to the given points :param base: Set of points to add the information :param rasters: Rasters with the information :return: name of the feature with the information, name of the added fields """ global MESSAGES MESSAGES.AddMessage("Assigning Raster information...") _verbose_print("Base: {}".format(base)) _verbose_print("Rasters: {}".format(rasters)) # Make a list of the rasters rasters = [x.strip("'") for x in rasters.split(";")] scratch_files = [] try: # Initialize progress bar regressor_names = [] arcpy.SetProgressor("step", "Adding raster values to the points", min_range=0, max_range=len(rasters), step_value=1) _verbose_print("Adding raster values to the points") for raster in rasters: _verbose_print("Adding information of {}".format(raster)) extracted_name = arcpy.CreateScratchName( "temp", data_type="FeatureClass", workspace=arcpy.env.scratchWorkspace) # Add the information of the raster to the points arcpy.gp.ExtractValuesToPoints(base, raster, extracted_name, "INTERPOLATE", "VALUE_ONLY") _verbose_print( "Scratch file created (merge): {}".format(extracted_name)) scratch_files.append(extracted_name) # Rename field to coincide with the raster name arcpy.AlterField_management(extracted_name, "RASTERVALU", arcpy.Describe(raster).baseName) regressor_names.append(arcpy.Describe(raster).baseName) base = extracted_name arcpy.SetProgressorPosition() # Reset progress bar scratch_files.remove(extracted_name) arcpy.SetProgressorLabel("Executing Enrich Points") arcpy.ResetProgressor() except: raise finally: # Delete intermediate files for s_file in scratch_files: arcpy.Delete_management(s_file) _verbose_print("Scratch file deleted: {}".format(s_file)) return extracted_name, regressor_names
def TrimFields(table): print "Trimming fields for: " + table arcpy.AddMessage("Trimming fields for: " + table) fields = arcpy.ListFields(table) for field in fields: if len(field.name) > 30: trim_amount = len(field.name) - 30 trimmed_name = "F" + field.name[:-trim_amount] arcpy.AlterField_management(table, field.name, trimmed_name) elif field.name == "end": new_name = field.name + "_" arcpy.AlterField_management(table, field.name, new_name) elif field.name.startswith("_"): #Hanndles the F appending needed by database for fields that start with underscores new_name = "F" + field.name arcpy.AlterField_management(table, field.name, new_name) return
def alterPlotSettingIDFieldName(outputLocation): '''Changes "pl_setting_id" in the final FSVeg_Spatial_WT output feature class to "Setting_ID" ''' FSVegGDBPath = os.path.join(outputLocation, 'FSVeg_Spatial_WT.gdb') arcpy.MakeTableView_management( os.path.join(FSVegGDBPath, "FSVeg_Spatial_WT_Photos"), "FSVeg_Spatial_WT_PhotosTable") arcpy.AlterField_management("FSVeg_Spatial_WT_PhotosTable", "pl_setting_id", "Setting_ID", "Setting_ID")
def manipulateTable(pivotTable): '''function that makes format changes to pivot table''' # delete records with null or blank reference code with arcpy.da.UpdateCursor(pivotTable, 'refcode') as cursor: for row in cursor: if row[0] == None or row[0] == "": cursor.deleteRow() # add field and populate with total number of records by adding all records arcpy.AddField_management(pivotTable, "total_records", "DOUBLE", field_length=3, field_alias="Total Records") expression = "!dmpend! + !dmproc! + !dmready! + !dr! + !idrev!" arcpy.CalculateField_management(pivotTable, "total_records", expression, "PYTHON_9.3") # join dm status and dm status comments data from survey site to pivot table join = os.path.join(env.workspace, "survey_site1") arcpy.AlterField_management(join, "dm_stat", "survey_site_dmstat", "Survey Site - DM Status") arcpy.JoinField_management(pivotTable, "refcode", join, "refcode", ["survey_site_dmstat", "dm_stat_comm"]) # join original data from elementRecords table to pivot table join = os.path.join(env.workspace, "elementRecords") arcpy.JoinField_management(pivotTable, "refcode", join, "refcode", [ "COUNTY_NAM", "created_by", "created_on", "last_up_by", "last_up_on", "element_type" ]) # add new field for east or west location arcpy.AddField_management(pivotTable, "Location", "TEXT", "", "", 1, "Location", "", "", "") # list of western counties West = [ "ERIE", "CRAWFORD", "MERCER", "LAWRENCE", "BEAVER", "WASHINGTON", "GREENE", "VENANGO", "BUTLER", "ALLEGHENY", "FAYETTE", "WESTMORELAND", "ARMSTORNG", "INDIANA", "CLARION", "JEFFERSON", "FOREST", "WARREN", "MCKEAN", "ELK", "CLEARFIELD", "CAMBRIA", "SOMERSET", "BEDFORD", "BLAIR", "CENTRE", "CLINTON", "POTTER", "CAMERON", "HUNTINGDON", "FULTON", "FRANKLIN" ] # populate location field with east or west depending if they are in list with arcpy.da.UpdateCursor(pivotTable, ["COUNTY_NAM", "Location"]) as cursor: for row in cursor: if row[0] in West: row[1] = "W" cursor.updateRow(row) else: row[1] = "E" cursor.updateRow(row)
def runPrefixCanrateZSat(projectGDBPath): # print("\nRenaming cancer rate statistics table fields...") # Set environment settings arcpy.env.workspace = r"C:\Users\rkpalmerjr\Documents\School\WISC\Fall_2019\GEOG_777_Capstone_in_GIS_Development\Project_1\TESTING\Test_1\Scratch" # Set local variables for Alter Field table = os.path.join(projectGDBPath, "canrateZSat") prefix = "can_" # newTableName = "canrateZSatPrefix" fieldList = arcpy.ListFields(table) # OPTION 1 (If using GDB) - Execute Alter Field for canrateZSaT table for field in fieldList: if (field.name != "OBJECTID"): prefixName = prefix + field.name arcpy.AlterField_management(table, field.name, prefixName, prefixName) # # OPTION 2 (If using shapefiles) - Execute Field Mapping and Table to Table for canrateZSaT table # # Create empty field mappings object # field_mappings = arcpy.FieldMappings() # # # Loop through fields. For each field, create a corresponding FieldMap object. # for field in fieldList: # # Local variables to add the "nit_" prefix to all field names # oldName = field.name # newName = prefix + oldName # # # Create a FieldMap object for all the old field names # newField = arcpy.FieldMap() # newField.addInputField(table, oldName) # # # Rename the output field # newFieldName = newField.outputField # newFieldName.name = newName # newFieldName.aliasName = newName # newField.outputField = newFieldName # # # Add the new field to the FieldMappings object # field_mappings.addFieldMap(newField) # # # Delete the local variables in the for loop # del oldName, newName, newField, newFieldName # # # Create a new table from the old table using the FieldMappings object # canrateZSatPrefix = arcpy.TableToTable_conversion(canrateZSaT, dataFolder, newTableName, field_mapping = field_mappings) # Delete local variables del table, prefix, fieldList # del table, prefix, newTableName, fieldList, field_mappings # print("\nCancer rate statistics table fields renamed.")
def add_elevation(points, dem="", detrend_dem=""): """ Adds elevation fields to the input feature class. Writes all outputs to the environment workspace. Args: points -- Path to a point feature class dem -- Path to the digital elevation model (DEM) detrend_dem -- Path to the detrended digital elevation model (DEM) Outputs: elevation attributes written to the input feature class """ # Add elevations to the `points` feature class if dem: arcpy.AddSurfaceInformation_3d(in_feature_class=points, in_surface=dem, out_property="Z", z_factor=1.0) # Change `Z` field name to `DEM_Z` arcpy.AlterField_management(in_table=points, field="Z", new_field_name="DEM_Z") arcpy.AddMessage("Added DEM elevations") else: arcypy.AddMessage("Error: DEM not supplied") # Add detrended elevations to the `points` feature class if detrend_dem: arcpy.AddSurfaceInformation_3d(in_feature_class=points, in_surface=detrend_dem, out_property="Z", z_factor=1.0) # Change `Z` field name to `Detrend_DEM_Z` arcpy.AlterField_management(in_table=points, field="Z", new_field_name="Detrend_DEM_Z") arcpy.AddMessage("Added Detrended DEM elevations") else: arcpy.AddMessage("Warning: Detrended DEM not supplied")
def GenerateTaxiDensityFeatureClass(fgdb, taxi_feature_class): """ GenerateTaxiDensityFeatureClass creates kernel density contour polygons from taxi feature class Args: fgdb (file gdb): path to local file gdb to keep the contour feature class taxi_feature_class (feature class): path to taxi feature class Returns: list: list of contour polygons features (polygon geometry in json) """ arcpy.env.overwriteOutput = True # allow overwriting output kd_raster = arcpy.sa.KernelDensity( taxi_feature_class, "NONE", area_unit_scale_factor="SQUARE_KILOMETERS", out_cell_values="DENSITIES", method="PLANAR" ) # generate kernel density raster of taxi using taxi locations contour_feature_class = os.path.join( fgdb, "contour") # define output contour feature class path if arcpy.Exists( contour_feature_class): # if contour feature class already exists arcpy.Delete_management( contour_feature_class) # delete the feature class arcpy.sa.Contour( kd_raster, contour_feature_class, contour_interval=50000, base_contour=0, contour_type="CONTOUR_SHELL_UP" ) # create contour feature class in intervals of 50000 (taxi/sqkm) arcpy.AlterField_management( contour_feature_class, "ContourMin", 'Contour', 'Minimum Contour' ) # rename ContourMin field (default name) to Contour to match schema of feature layer contour_dataset = [] # define empty list of contour dataset with arcpy.da.SearchCursor( contour_feature_class, ["Contour", "SHAPE@JSON", "ContourMax"] ) as in_cursor: # define search cursor to read the field Contour and polygon geometry as json string for row in in_cursor: # iterate each polygon contour feature contour_dataset.append( { "Contour": row[0], "PolygonGeometry": eval(row[1])['rings'], "ContourMax": row[2] } ) # append each polygon's Contour field value and geometry json value to list del in_cursor # delete search cursor after using return contour_dataset # return list of contour polygon to main