def data_process(path, keepFields, expression0, expression1, newName): # Automate add normalized variable field fieldName1 = arcpy.ValidateFieldName("TFvariable") arcpy.AddField_management(path, fieldName1, "DOUBLE", "", "", 50) # Automate field calculator for normalized variable arcpy.CalculateField_management(path, fieldName1, expression0, "PYTHON_9.3") # Automate add normalized variable field fieldName2 = arcpy.ValidateFieldName("Z_score") arcpy.AddField_management(path, fieldName2, "DOUBLE", "", "", 50) # Automate field calculator for normalized variable arcpy.CalculateField_management(path, fieldName2, expression1, "PYTHON_9.3") # Automate list fields fields = arcpy.ListFields(path) # Automate drop fields dropFields = [x.name for x in fields if x.name not in keepFields] # Automate delete field arcpy.DeleteField_management(path, dropFields) # Process TFvariable zero data zero_process(path) # Process TFvariable missing values null_process(path) # Automate rename arcpy.Rename_management(path, newName)
def getClassFieldName(fieldName, classVal, table): '''This function generates a valid fieldname based on the combination of a desired fieldname and a class value **Description:** The expectation for the ATtILA output table is one field per metric per class value, if a class is specified. A simple concatenation of metric fieldname and class value has the potential to be trimmed, depending on the limits of the output table format (i.e. 10 characters for dbf files). This function thus concatenates the desired fieldname with the specified class value, tries the arcpy field validation, and if the concatenated fieldname gets shortened, trims the desired fieldname by the appropriate amount so that the class value is always the final characters of the fieldname string. **Arguments:** * *fieldName* - the desired metric fieldname to which the class value will be appended * *classVal* - the class value * *table* - the table to which the field will be added **Returns:** * *validFieldName* - validated fieldname ''' # Ensure we have a string value for the class classVal = str(classVal) # Build a test fieldname testFieldName = fieldName + classVal # Run the test fieldname through ESRI's fieldname validator validFieldName = arcpy.ValidateFieldName(testFieldName, arcpy.Describe(table).path) # if the validator shortened the field if len(testFieldName) > len(validFieldName): # figure out how much needs to be trimmed trim = len(validFieldName) - len(testFieldName) # Trim this from the intended fieldname, so the class value isn't lost testFieldName = fieldName[:trim] + classVal # Revalidate - to ensure there aren't duplicate fields. validFieldName = arcpy.ValidateFieldName(testFieldName, table) return validFieldName
def CalcMeans(inTable, workspace, theField, numHouses): arcpy.AddMessage("\tCalculating means for " + theField + " in table: " + inTable + "...") if workspace == '': workspace = "in_memory" elif workspace == "in_memory": mTable = workspace + os.sep + "mean_value" else: mTable = workspace + os.sep + "mean_value.dbf" # Get value from summary table if not numHouses == "": arcpy.Statistics_analysis(inTable, mTable, theField + " MEAN", "HOUSES") theField = "MEAN_" + theField theField = arcpy.ValidateFieldName(theField, workspace) rows = arcpy.SearchCursor(mTable, "HOUSES = " + str(numHouses)) else: arcpy.Statistics_analysis(inTable, mTable, theField + " MEAN") # Get value from summary table theField = "MEAN_" + theField theField = arcpy.ValidateFieldName(theField, workspace) rows = arcpy.SearchCursor(mTable) arcpy.AddMessage("\tGetting value for " + theField) row = rows.next() theMean = row.getValue(theField) ## Delete the row and cursor del row, rows return theMean
def round_date_time(in_fc, input_field, new_field_name, set_year=None, set_month=None, set_day=None, set_hour=None, set_minute=None, set_second=None): """ This function will take in an feature class, and use pandas/numpy to truncate a date time so that the passed date-time attributes are set to a target. Parameters ----------------- in_fc - input feature class with datetime field input_field - input time field new_field_name - new field created set_year - year to set set_month - month to set set_day - day to set set_hour - hour to set set_minute - minute to set set_second - second ot set """ try: # arc_print(pd.__version__) Does not have dt lib. arcpy.env.overwriteOutput = True desc = arcpy.Describe(in_fc) workspace = os.path.dirname(desc.catalogPath) col_new_field = arcpy.ValidateFieldName(san.create_unique_field_name(new_field_name, in_fc), workspace) san.add_new_field(in_fc, col_new_field, "DATE") OIDFieldName = arcpy.Describe(in_fc).OIDFieldName san.arc_print("Creating Pandas Dataframe from input table.") query = "{0} {1} {2}".format(arcpy.AddFieldDelimiters(in_fc, input_field), "is NOT", "NULL") fcDataFrame = san.arcgis_table_to_dataframe(in_fc, [input_field, col_new_field], query) JoinField = arcpy.ValidateFieldName("DFIndexJoin", workspace) fcDataFrame[JoinField] = fcDataFrame.index try: san.arc_print("Creating new date-time column based on field {0}.".format(str(input_field)), True) fcDataFrame[col_new_field] = fcDataFrame[input_field].apply( lambda dt: san.round_new_datetime(dt, set_year, set_month, set_day, set_hour, set_minute, set_second)).astype(datetime.datetime) del fcDataFrame[input_field] except Exception as e: del fcDataFrame[input_field] san.arc_print( "Could not process datetime field. " "Check that datetime is a year appropriate to your python version and that " "the time format string is appropriate.") san.arc_print(e.args[0]) pass san.arc_print("Exporting new time field dataframe to structured numpy array.", True) finalStandardArray = fcDataFrame.to_records() san.arc_print("Joining new date-time field to feature class.", True) arcpy.da.ExtendTable(in_fc, OIDFieldName, finalStandardArray, JoinField, append_only=False) san.arc_print("Delete temporary intermediates.") del fcDataFrame, finalStandardArray san.arc_print("Script Completed Successfully.", True) except arcpy.ExecuteError: san.arc_print(arcpy.GetMessages(2)) except Exception as e: san.arc_print(e.args[0])
def add_Time_String_Field(in_fc, input_field, new_field_name, time_format): """ This function will take in an feature class, and use pandas/numpy to format a date string based on the input time format. """ try: # arc_print(pd.__version__) Does not have dt lib. arcpy.env.overwriteOutput = True desc = arcpy.Describe(in_fc) workspace = os.path.dirname(desc.catalogPath) col_new_field = arcpy.ValidateFieldName( san.create_unique_field_name(new_field_name, in_fc), workspace) san.add_new_field(in_fc, col_new_field, "TEXT") OIDFieldName = arcpy.Describe(in_fc).OIDFieldName san.arc_print("Creating Pandas Dataframe from input table.") query = "{0} {1} {2}".format( arcpy.AddFieldDelimiters(in_fc, input_field), "is NOT", "NULL") fcDataFrame = san.arcgis_table_to_dataframe( in_fc, [input_field, col_new_field], query) JoinField = arcpy.ValidateFieldName("DFIndexJoin", workspace) fcDataFrame[JoinField] = fcDataFrame.index try: san.arc_print( "Creating new text column based on field {0}.".format( str(input_field)), True) fcDataFrame[col_new_field] = fcDataFrame[input_field].apply( lambda dt: dt.strftime(time_format)) del fcDataFrame[input_field] except Exception as e: del fcDataFrame[input_field] san.arc_print( "Could not process datetime field. " "Check that datetime is a year appropriate to your python version and that " "the time format string is appropriate.") san.arc_print(e.args[0]) pass san.arc_print( "Exporting new time field dataframe to structured numpy array.", True) finalStandardArray = fcDataFrame.to_records() san.arc_print("Joining new time string field to feature class.", True) arcpy.da.ExtendTable(in_fc, OIDFieldName, finalStandardArray, JoinField, append_only=False) san.arc_print("Delete temporary intermediates.") del fcDataFrame, finalStandardArray san.arc_print("Script Completed Successfully.", True) except arcpy.ExecuteError: san.arc_print(arcpy.GetMessages(2)) except Exception as e: san.arc_print(e.args[0])
def create_class_group_field(in_fc, input_fields, basename="GROUP_"): """ This function will take in an feature class, and use pandas/numpy to calculate Z-scores and then join them back to the feature class using arcpy. Parameters ----------------- in_fc- input feature class to add percentile fields input_fields - input fields to build a unique id from basename- base name for group fields.""" try: arcpy.env.overwriteOutput = True desc = arcpy.Describe(in_fc) workspace = os.path.dirname(desc.catalogPath) input_Fields_List = input_fields.split(';') san.arc_print("Adding Class Fields.", True) valid_num_field = arcpy.ValidateFieldName("{0}_Num".format(basename), workspace) valid_text_field = arcpy.ValidateFieldName("{0}_Text".format(basename), workspace) san.add_new_field(in_fc, valid_num_field, "LONG") san.add_new_field(in_fc, valid_text_field, "TEXT") san.arc_print("Constructing class groups within dictionary.", True) unique_class_dict = {} cursor_fields = input_Fields_List + [valid_text_field, valid_num_field] with arcpy.da.UpdateCursor(in_fc, cursor_fields) as cursor: counter = 0 group_id = 1 for row in cursor: try: group_field_values = row[: -2] # Grab all but last two fields. unique_id = constructUniqueStringID(group_field_values) if unique_id not in unique_class_dict: unique_class_dict[unique_id] = group_id group_id += 1 row[-1] = unique_class_dict[unique_id] row[-2] = unique_id cursor.updateRow(row) counter += 1 except Exception as e: san.arc_print( "ERROR: Skipped at iteration {0}. QAQC.".format( counter), True) san.arc_print(str(e.args[0])) del unique_class_dict san.arc_print("Script Completed Successfully.", True) except arcpy.ExecuteError: san.arc_print(arcpy.GetMessages(2)) except Exception as e: san.arc_print(e.args[0])
def _tool(): """run when script is from a tool """ in_tbl = sys.argv[1] in_fld = sys.argv[2] out_fld = sys.argv[3] # output field name # ---- main tool section desc = arcpy.da.Describe(in_tbl) tbl_path = desc['path'] fnames = [i.name for i in arcpy.ListFields(in_tbl)] if out_fld in fnames: out_fld += 'dup' out_fld = arcpy.ValidateFieldName(out_fld, tbl_path) args = [in_tbl, in_fld, out_fld, tbl_path] msg = "in_tbl {}\nin_fld {}\nout_fld {}\ntbl_path {}".format(*args) tweet(msg) # # ---- call section for processing function # oid = 'OBJECTID' vals = [oid] + in_fld in_arr = arcpy.da.TableToNumPyArray(in_tbl, vals) tweet("{!r:}".format(arr)) # a0 = in_arr[in_fld] # do stuff here sze = a0.dtype.str # ---- reassemble the table for extending dt = [('IDs', '<i8'), (out_fld, sze)] out_array = np.copy(in_arr.shape[0]) out_array[out_fld] = a0 # result goes here out_array.dtype = dt arcpy.da.ExtendTable(in_tbl, 'OBJECTID', out_array, 'IDs')
def _tool(): """run when script is from a tool """ in_tbl = sys.argv[1] in_flds = sys.argv[2] out_fld = sys.argv[3] if ';' in in_flds: in_flds = in_flds.split(';') else: in_flds = [in_flds] desc = arcpy.da.Describe(in_tbl) tbl_path = desc['path'] fnames = [i.name for i in arcpy.ListFields(in_tbl)] if out_fld in fnames: out_fld += 'dup' out_fld = arcpy.ValidateFieldName(out_fld, tbl_path) args = [in_tbl, in_flds, out_fld, tbl_path] msg = "in_tbl {}\nin_fld {}\nout_fld {}\ntbl_path {}".format(*args) tweet(msg) oid = 'OBJECTID' vals = [oid] + in_flds arr = arcpy.da.TableToNumPyArray(in_tbl, vals) tweet("{!r:}".format(arr)) arcpy.da.ExtendTable(in_tbl, 'OBJECTID', arr, 'OBJECTID')
def makeTextID(field, table): ''' This function creates a copy of an existing field with the String format. ** Description: ** Certain types of fields cause problems when performing joins, and Strings are generally the most reliable. This function creates a new field with string format of length 30 and copies all data from the problem field. **Arguments:** * *field* - input arcpy field object * *table* - name with full path of input table to be modified) **Returns:** * *textFieldName* - validated field name of added field. ''' # Obtain valid fieldname textFieldName = arcpy.ValidateFieldName("txt" + field.name, table) # Test for Schema Lock if arcpy.TestSchemaLock(table): # Add the output text field arcpy.AddField_management(table, textFieldName, "TEXT", "#", "#", "30") else: arcpy.AddMessage( "Unable to acquire the necessary schema lock to add the new field") # Calculate the field values arcpy.CalculateField_management(table, textFieldName, '!' + field.name + '!', "PYTHON") # Since this field will be used in joins, index the field. arcpy.AddIndex_management(table, textFieldName, "idIDX", "UNIQUE") return textFieldName
def __createField(self, field): """Helper function to create individual field when running createFeatureClass method""" name = field['name'] fType = field['type'] fieldLength = None if 'shape' in name.lower(): return elif "String" in fType: fieldType = "TEXT" fieldLength = field['length'] elif "Date" in fType: fieldType = "DATE" elif "SmallInteger" in fType: fieldType = "SHORT" elif "Integer" in fType: fieldType = "LONG" elif "Double" in fType: fieldType = "DOUBLE" elif "Single" in fType: fieldType = "FLOAT" else: fieldType = "Unknown" featureClass = self.featureClassLocation + "\\" + self.name validatedName = arcpy.ValidateFieldName(name, self.featureClassLocation) arcpy.AddField_management(in_table=featureClass, field_name=name, field_type=fieldType, field_length=fieldLength)
def countCategoricalPointTypesWithinPolygons(fcPoint, pointFieldName, fcPolygon, workspace): '''Function to count the unique attributes in a point feature class field and append them as new fields in the polygon feature class, with the values set to the count of features, by type, in each particular polygon feature''' if arcpy.Exists(workspace): # Check if input workspace is valid arcpy.env.workspace = workspace # Set workspace to workspace if arcpy.Exists(fcPoint) and arcpy.Exists(fcPolygon): try: # Gather unique names for features in the point FC features_set = set([ row[0] for row in arcpy.da.SearchCursor(fcPoint, [pointFieldName]) ]) # Iterate through each unique attribute for feature in features_set: # Add field to fcPolygon for attribute new_field = arcpy.ValidateFieldName("{}"\ .format(re.sub(r'\W+',"",feature[:13])),os.path.dirname(fcPolygon)) arcpy.management.AddField(fcPolygon, new_field, "SHORT") # Filter and spatially join only the features that meet the pointFieldName criteria where_clause = f"{pointFieldName} LIKE '%{feature}%'" selection = arcpy.management.SelectLayerByAttribute( fcPoint, "NEW_SELECTION", where_clause) arcpy.analysis.SpatialJoin(fcPolygon, selection, "outfc", "", "", "", "CONTAINS") # Update the pnt_count field with the count of elements that joined in the spatial join with arcpy.da.UpdateCursor(fcPolygon, ["FIPS", new_field]) as up_curs: for urow in up_curs: with arcpy.da.SearchCursor( "outfc", ["FIPS", "Join_Count"]) as search_curs: for srow in search_curs: if urow[0] == srow[0]: urow[1] = srow[1] up_curs.updateRow(urow) # Delete the holding outfc arcpy.management.Delete("outfc") print("Added field counting {} in {}".format( feature, arcpy.Describe(fcPolygon).baseName)) except Exception as e: # Catch errors in the field manipulation calculations print("Error in code execution", e) sys.exit(1) else: # Print statement if input features are invalid print("Feature classses do not exist, check input feature classes") sys.exit(1) else: # Print statement if input workspace is invalid print("Workspace does not exist, correct workspace input") sys.exit(1)
def add_Standarized_Fields(in_fc, input_Fields, ignore_nulls=True): """ This function will take in an feature class, and use pandas/numpy to calculate Z-scores and then join them back to the feature class using arcpy.""" try: arcpy.env.overwriteOutput = True desc = arcpy.Describe(in_fc) OIDFieldName = desc.OIDFieldName workspace = os.path.dirname(desc.catalogPath) input_Fields_List = input_Fields finalColumnList = [] scored_df = None for column in input_Fields_List: try: field_series = san.arcgis_table_to_dataframe(in_fc, [column], skip_nulls=ignore_nulls, null_values=0) san.arc_print("Creating standarized column for field {0}.".format(str(column)), True) col_Standarized = arcpy.ValidateFieldName("Zscore_" + column, workspace) field_series[col_Standarized] = (field_series[column] - field_series[column].mean()) / field_series[ column].std(ddof=0) finalColumnList.append(col_Standarized) if col_Standarized != column: del field_series[column] if scored_df is None: san.arc_print("Test") scored_df = field_series else: scored_df = pd.merge(scored_df, field_series, how="outer", left_index=True, right_index=True) except Exception as e: san.arc_print("Could not process field {0}".format(str(column))) san.arc_print(e.args[0]) pass JoinField = arcpy.ValidateFieldName("DFIndexJoin", workspace) scored_df[JoinField] = scored_df.index finalColumnList.append(JoinField) san.arc_print("Exporting new standarized dataframe to structured numpy array.", True) finalStandardArray = scored_df.to_records() san.arc_print( "Joining new standarized fields to feature class. The new fields are {0}".format(str(finalColumnList)) , True) arcpy.da.ExtendTable(in_fc, OIDFieldName, finalStandardArray, JoinField, append_only=False) san.arc_print("Script Completed Successfully.", True) except arcpy.ExecuteError: san.arc_print(arcpy.GetMessages(2)) except Exception as e: san.arc_print(e.args[0])
def normalizeFieldList(keylist): print keylist ff = (lambda s: (str( arcpy.ValidateFieldName((s[0:4]) + (s[-5:]) if len(s) >= 10 else s))).upper()) #This makes sure the tag names are converted into valid fieldnames (of length 10 max), where the first and the last 5 characters are taken to build the string tag_fields = map(ff, keylist) print tag_fields return tag_fields
def _tool(): """run when script is from a tool """ in_tbl = sys.argv[1] in_fld = sys.argv[2] out_fld = sys.argv[3] all_punc = sys.argv[4] all_white = sys.argv[5] all_extra = sys.argv[6] all_others = sys.argv[7] a0 = [[], punc][all_punc in (True, 'True', 'true')] a1 = [[], whitesp][all_white in (True, 'True', 'true')] if len(all_others) == 1: a2 = list(all_others) elif len(all_others) > 1: if ";" in all_others: a2 = all_others.replace(";", "xx") a2 = a2.split('xx')[:-1] else: a2 = [] # strip_list = a0 + a1 + a2 desc = arcpy.da.Describe(in_tbl) tbl_path = desc['path'] is_gdb_tbl = tbl_path[-4:] == '.gdb' fnames = [i.name for i in arcpy.ListFields(in_tbl)] if out_fld in fnames: out_fld += 'dup' out_fld = arcpy.ValidateFieldName(out_fld, tbl_path) args = [in_tbl, in_fld, out_fld, tbl_path] msg = "in_tbl {}\nin_fld {}\nout_fld {}\ntbl_path {}".format(*args) tweet(msg) tweet("Removing .... {}".format(strip_list)) oid = 'OBJECTID' vals = [oid, in_fld] # # ---- do the work # arr = arcpy.da.TableToNumPyArray(in_tbl, vals) tweet("{!r:}".format(arr)) a0 = arr[in_fld] # cleaned = clean_fld(a0, strip_list) # punc # if all_extra in (True, 'True', 'true'): sps = [' ', ' ', ' '] for i in sps: cleaned = np.char.replace(cleaned, i, " ") sze = cleaned.dtype.str dt = [('IDs', '<i8'), (out_fld, sze)] out_array = np.empty((arr.shape[0], ), dtype=dt) out_array['IDs'] = np.arange(1, arr.shape[0] + 1) out_array[out_fld] = cleaned # # arcpy.da.ExtendTable(in_tbl, 'OBJECTID', out_array, 'IDs')
def OSMtoShape(self, outFC): # Create the output feature class in WGS84 #outFC = os.path.join(arcpy.env.workspace,arcpy.ValidateTableName("OSM")) if self.elem == "node": fc = 'POINT' res = self.result.nodes elif self.elem == "area": fc = 'POLYGON' res = self.result.ways elif self.elem == "line": fc = 'POLYLINE' res = self.result.ways arcpy.CreateFeatureclass_management(os.path.dirname(outFC), os.path.basename(outFC), fc, '', '', '', self.rs) # Join fields to the feature class, using ExtendTable tag_list = list(self.tag_set) tag_fields = map(lambda s: str(arcpy.ValidateFieldName(s)), tag_list) print tag_fields field_array = [ ('intfield', numpy.int32), ('Name_d', '|S255'), ('Value_d', '|S255'), ('Key_d', '|S255'), ] for f in tag_fields: field_array.append((f, '|S255')) print field_array inarray = numpy.array([], numpy.dtype(field_array)) arcpy.da.ExtendTable(outFC, "OID@", inarray, "intfield") field_list = ['Name_d', 'Value_d', 'Key_d', 'SHAPE@'] field_list.extend(tag_fields) print field_list rowsDA = arcpy.da.InsertCursor(outFC, field_list) #arcpy.SetProgressor('step', 'Converting GPX points...', 0, howManyElements, 1) # Loop over each point in the tree and put the information inside a new row #Geometries and attributes are inserted for element in res: geom = self.createGeometry(element) f = lambda tag: element.tags.get(tag, "n/a") tag_values = map(f, tag_list) l = [ element.tags.get("name", "n/a"), element.tags.get(self.key, "n/a"), self.key, geom ] l.extend(tag_values) try: rowsDA.insertRow(l) except RuntimeError, e: arcpy.AddError(str(e))
def value_process(path): # Automate add transformation variable field fieldName0 = arcpy.ValidateFieldName("Tvariable") arcpy.AddField_management(path, fieldName0, "TEXT", "", "", 50) # Automate field calculator for normalized variable arcpy.CalculateField_management(path, fieldName0, '""', "PYTHON_9.3") name = arcpy.Describe(path).name.strip(".shp") if name == duplicated_name("Income_DA", 0): u0 = value_calculate(path, "Income_DA", 0, 1, "AVERAGE_HO") cursor = arcpy.da.UpdateCursor( path, ["POPULATION", "AVERAGE_HO", "Tvariable"]) for row in cursor: if row[0] == 0: cursor.deleteRow() if row[0] == -9999: row[1] = u0 row[2] = "N/A" cursor.updateRow(row) del row del cursor elif name == duplicated_name("Labour_DA", 0): u0 = value_calculate(path, "Labour_DA", 0, 1, "IN_THE_LAB") u1 = value_calculate(path, "Labour_DA", 0, 2, "UNEMPLOYED") cursor = arcpy.da.UpdateCursor( path, ["IN_THE_LAB", "UNEMPLOYED", "Tvariable"]) for row in cursor: if row[0] == 0: cursor.deleteRow() if row[0] == -9999: row[0] = u0 cursor.updateRow(row) if row[1] == -9999: row[1] = u1 row[2] = "N/A" cursor.updateRow(row) del row del cursor elif name == duplicated_name("Marital_DA", 0): u0 = value_calculate(path, "Marital_DA", 0, 1, "TOTAL_NU8") u1 = value_calculate(path, "Marital_DA", 0, 2, "PERIOD_OF_") u2 = value_calculate(path, "Marital_DA", 0, 3, "PERIOD_OF1") cursor = arcpy.da.UpdateCursor( path, ["TOTAL_NU8", "PERIOD_OF_", "PERIOD_OF1", "Tvariable"]) for row in cursor: if row[0] == 0: cursor.deleteRow() if row[0] == -9999: row[0] = u0 cursor.updateRow(row) if row[1] == -9999 or row[2] == -9999: row[1] = u1 row[2] = u2 row[3] = "N/A" cursor.updateRow(row) del row del cursor
def clean_time_window_prefix_strings(param_value_table, prefix_field_idx, out_gdb): """Clean the user-specified time window field prefixes to ensure good field names in the output.""" if param_value_table.altered and out_gdb: values = param_value_table.values for idx, value in enumerate(values): prefix = value[prefix_field_idx] values[idx][prefix_field_idx] = arcpy.ValidateFieldName( prefix, out_gdb) param_value_table.values = values
def validate_df_names(dataframe, output_feature_class_workspace): """Returns pandas dataframe with all col names renamed to be valid arcgis table names.""" new_name_list = [] old_names = dataframe.columns.names for name in old_names: new_name = arcpy.ValidateFieldName(name, output_feature_class_workspace) new_name_list.append(new_name) rename_dict = {i: j for i, j in zip(old_names, new_name_list)} dataframe.rename(index=str, columns=rename_dict) return dataframe
def add_footprints_attribute(pathdab, infile, list_polygon_footprints, absolute_DTM_paths): # define paths and workspace (I need to create the gdb at some points) env.workspace = env.scratchWorkspace = pathdab fieldname1 = arcpy.ValidateFieldName("DTM_name") fieldname2 = arcpy.ValidateFieldName("abspath") # arcpy.AddField_management(infile, fieldname1, "TEXT", "", "", 60) arcpy.AddField_management(infile, fieldname2, "TEXT", "", "", 90) with arcpy.da.UpdateCursor(infile, [fieldname1, fieldname2]) as cursor: ix = 0 for row in cursor: print(list_polygon_footprints[ix]) row[0] = list_polygon_footprints[ix] row[1] = absolute_DTM_paths[ix] cursor.updateRow(row) ix = ix + 1 print("DONE")
def createOutput(self, outputTable): """Creates Moran's I Step Output Table. INPUTS outputTable (str): path to the output table """ #### Allow Overwrite Output #### ARCPY.env.overwriteOutput = 1 #### Get Output Table Name With Extension if Appropriate #### outputTable, dbf = UTILS.returnTableName(outputTable) #### Set Progressor #### ARCPY.SetProgressor("default", ARCPY.GetIDMessage(84008)) #### Delete Table If Exists #### UTILS.passiveDelete(outputTable) #### Create Table #### outPath, outName = OS.path.split(outputTable) try: DM.CreateTable(outPath, outName) except: ARCPY.AddIDMessage("ERROR", 541) raise SystemExit() #### Add Result Fields #### self.outputFields = [] for field in iaFieldNames: fieldOut = ARCPY.ValidateFieldName(field, outPath) UTILS.addEmptyField(outputTable, fieldOut, "DOUBLE") self.outputFields.append(fieldOut) #### Create Insert Cursor #### try: insert = DA.InsertCursor(outputTable, self.outputFields) except: ARCPY.AddIDMessage("ERROR", 204) raise SystemExit() #### Add Rows to Output Table #### for testIter in xrange(self.nIncrements): insert.insertRow(self.giResults[testIter]) #### Clean Up #### del insert return outputTable, dbf
def form_output(in_tbl, in_arr, out_fld="Result_", del_fld=True, vals=None, idx=0, xtend=False): """Form the output table given a field name and join field Requires: --------- tbl : input table fld_name : output field names, should contain OBJECTID and desired output field vals : values for output field sze : string representation of output field idx : index to start values from... usually 0 or 1 (ie for sequential) """ desc = arcpy.da.Describe(in_tbl) tbl_path = desc['path'] oid_fld = desc['OIDFieldName'] # 'OBJECTID' fnames = [i.name for i in arcpy.ListFields(in_tbl)] if del_fld in ('True', 'true', True, 1): del_fld = True else: del_fld = False if out_fld not in fnames: out_fld = out_fld elif out_fld in fnames and del_fld: arcpy.DeleteField_management(in_tbl, out_fld) tweet("\nDeleting field {}".format(out_fld)) else: out_fld += 'dup' out_fld = arcpy.ValidateFieldName(out_fld, tbl_path) # sze = vals.dtype.str dt = [('IDs', '<i4'), (out_fld, sze)] # ie '<f8' out_array = np.zeros((in_arr.shape[0], ), dtype=dt) out_array['IDs'] = in_arr[oid_fld] out_array[out_fld][idx:] = vals if xtend: arcpy.da.ExtendTable(in_tbl, oid_fld, out_array, 'IDs') return out_array
def countPointsByTypeWithinPolygon(input_geodatabase, fcPoint, pointFieldName, pointFieldValue, fcPolygon): '''This function counts the number of point features of a given type within a polygon and returns the count of features in a new field in the polygon''' if arcpy.Exists(input_geodatabase): # Check if input workspace is valid arcpy.env.workspace = input_geodatabase # Set workspace to input_geodatabase if arcpy.Exists(fcPoint) and arcpy.Exists(fcPolygon): try: # Create new field to hold count of features count_field = arcpy.ValidateFieldName( "Pnt_count", os.path.dirname(fcPolygon)) arcpy.management.AddField(fcPolygon, count_field, "SHORT") # Filter and spatially join only the features that meet the pointFieldName criteria where_clause = f"{pointFieldName} LIKE '%{pointFieldValue}%'" selection = arcpy.management.SelectLayerByAttribute( fcPoint, "NEW_SELECTION", where_clause) arcpy.analysis.SpatialJoin(fcPolygon, selection, "outfc", "", "", "", "CONTAINS") # Update the pnt_count field with the count of elements that joined in the spatial join with arcpy.da.UpdateCursor(fcPolygon, ["FIPS", count_field]) as up_curs: for urow in up_curs: with arcpy.da.SearchCursor( "outfc", ["FIPS", "Join_Count"]) as search_curs: for srow in search_curs: if urow[0] == srow[0]: urow[1] = srow[1] up_curs.updateRow(urow) print("Added field counting {} numbers in {}".format( pointFieldValue, arcpy.Describe(fcPolygon).baseName)) # Delete the holding outfc arcpy.management.Delete("outfc") except Exception as e: # Catch errors in the field manipulation calculations print("Error in code execution", e) sys.exit(1) else: # Print statement if input features are invalid print("Feature classses do not exist, check input feature classes") sys.exit(1) else: # Print statement if input workspace is invalid print("Workspace does not exist, correct input_geodatabase") sys.exit(1)
def calculateDensity(fcpolygon, attribute, geodatabase="assignment2.gdb"): arcpy.env.overwriteOutput = True if arcpy.Exists(geodatabase): # Check if input workspace is valid arcpy.env.workspace = geodatabase # Set workspace to input_geodatabase else: # Print statement if input workspace is invalid print("Workspace does not exist, correct input_geodatabase") sys.exit(1) try: desc = arcpy.Describe( fcpolygon) # Describe the fcpolygon for multiple uses if desc.spatialReference.type == "Projected": # Check CRS type if desc.shapeType == 'Polygon': fields = [field.name for field in arcpy.ListFields(fcpolygon)] if attribute in fields: # Add a field for area in square miles arcpy.management.AddGeometryAttributes( fcpolygon, 'AREA', "", 'SQUARE_MILES_US') # Add an empty field for density by sqm new_field = arcpy.ValidateFieldName( "density_sqm", os.path.dirname(fcpolygon)) arcpy.management.AddField(fcpolygon, new_field, "DOUBLE") print('Added field ', new_field) # Divide the attribute by the area to get density arcpy.management.CalculateField( fcpolygon, new_field, f"!{attribute}!/!POLY_AREA!") print('Density calculated') else: # Raise error if not projected CRS print('Invalid attribute name') sys.exit(1) else: print( f"Incorrect shape type, {fcpolygon} is a {desc.shapeType}") else: print(f"{desc.baseName} is not in a projected CRS") sys.exit(1) except Exception as e: print(e) sys.exit(1)
def validate(out_table, fieldnames): """ validates input csv or excel field names based on the output table's requirements """ directory = os.path.dirname(out_table) out_name = os.path.basename(out_table) wspaceType = arcpy.Describe(directory).workspaceType # start a list of fields within the new table. the OID and default fields # are different, depending on the table type if wspaceType == "FileSystem": if out_name.endswith('.dbf'): tempFields = ['objectid', 'field1'] else: tempFields = ['rowid', 'objectid', 'field1'] else: tempFields = ['objectid'] initialList = tempFields[:] for i in range(0, len(fieldnames)): # validate fieldnames if fieldnames[i] == "" or fieldnames[i] == None: new_field = "field_{0}".format(i + 1) else: new_field = arcpy.ValidateFieldName(fieldnames[i], directory) if new_field.lower() in initialList: if wspaceType == 'FileSystem': new_field = "{0}_{1}".format(new_field[:6], i + 1) else: new_field = "{0}_{1}".format(new_field, i + 1) # if the fieldname has been changed from its original name (through # validation or to prevent identical names), add a message, and change # the name in the fieldname list to the new name (used in the table) if new_field != fieldnames[i]: arcpy.AddWarning(" Field '{0}' is now '{1}'".format( fieldnames[i], new_field)) fieldnames[i] = new_field initialList.append(new_field) # remove the OID field (and any default fields from the list of fields, # which was only used to ensure no identical fields were added for field in tempFields: initialList.remove(field) # get the lower case values to make comparison easier initialList = [initial.lower() for initial in initialList] return initialList
def countObservationsWithinDistance(fcPoint, distance, distanceUnit, geodatabase="assignment2.gdb"): arcpy.env.overwriteOutput = True if arcpy.Exists(geodatabase): # Check if input workspace is valid arcpy.env.workspace = geodatabase # Set workspace to input_geodatabase else: # Print statement if input workspace is invalid print("Workspace does not exist, correct input_geodatabase") sys.exit(1) # Add field to fcPoint try: new_field = arcpy.ValidateFieldName("count", os.path.dirname(fcPoint)) arcpy.management.AddField(fcPoint, new_field, "DOUBLE") print('Adding field ', new_field) except Exception as e: print(e, "Error adding field to fcPoint") sys.exit(1) # Update cursor try: desc = arcpy.Describe(fcPoint) if desc.spatialReference.type == 'Geographic': distance_type = "WITHIN_A_DISTANCE_GEODESIC" print('Calculating Geometric Distance...') else: distance_type = "WITHIN_A_DISTANCE" print("Calculating Euclidian distance...") with arcpy.da.UpdateCursor(fcPoint, ["OBJECTID", new_field]) as cursor: for row in cursor: arcpy.management.SelectLayerByAttribute( fcPoint, "NEW_SELECTION", f"OBJECTID = {row[0]}") arcpy.management.SelectLayerByLocation( fcPoint, distance_type, fcPoint, f"{distance} {distanceUnit}", "NEW_SELECTION") count = arcpy.management.GetCount(fcPoint) row[1] = count[0] cursor.updateRow(row) print('Processing Complete') except Exception as e: print(e, "Error with selection operation.") sys.exit(1)
def chained_scoring_func(in_fc, scoring_fields, threshold_upper, threshold_lower=0, if_less_score=1, if_more_score=0): """This tool will score fields based a upper and lower bound threhsold, and return values to those fields based on if it is less than or more than the threshold. All fields treated the same. """ try: arcpy.env.overwriteOutput = True desc_in_fc = arcpy.Describe(in_fc) workspace = desc_in_fc.catalogPath fields_list = scoring_fields new_score_fields = [ arcpy.ValidateFieldName( "SCORE_{0}".format( str(i).replace("DIST_", "", 1).replace("ANGLE_", "", 1)), workspace) for i in fields_list ] arc_print("Adding and Computing Score Fields.", True) for new_score_pair in zip(fields_list, new_score_fields): field_to_score = new_score_pair[0] new_score = new_score_pair[1] add_new_field(in_fc, new_score, "DOUBLE", field_alias=new_score) arc_print( "Computing score for field {0}. Returning {1} if value <= {2} and >= {3}, and {4} otherwise." .format(str(new_score), str(if_less_score), str(threshold_upper), str(threshold_lower), str(if_more_score)), True) try: with arcpy.da.UpdateCursor( in_fc, [field_to_score, new_score]) as cursor: for row in cursor: row[1] = score_value(row[0], threshold_upper, threshold_lower, if_less_score, if_more_score) cursor.updateRow(row) except: arc_print("Could not process field {0}".format(new_score)) except Exception as e: arc_print(str(e.args[0])) print(e.args[0])
def add_dll_defined_fields_to_table(dll,calculation,table,overwrite): global outlength, names arcpy.AddMessage(' Overwrite output = %s'%overwrite) # get output names dll.calc_get_all_output_names.restype = ctypes.POINTER(ctypes.c_char_p) dll.calc_get_short_output_names.restype = ctypes.POINTER(ctypes.c_char_p) dll.calc_get_output_length.restype = ctypes.c_int outlength = dll.calc_get_output_length(calculation) alii = list(dll.calc_get_all_output_names(calculation)[0:outlength]); names = list(dll.calc_get_short_output_names(calculation)[0:outlength]); # ensure names are valid for table type names = [arcpy.ValidateFieldName(x,os.path.dirname(table)) for x in names] # check fields won't be overwritten unless specified existing_field_names = [x.name for x in arcpy.ListFields(table)] if not overwrite: error_happened = False for name,alias in zip(names,alii): if name in existing_field_names: arcpy.AddError('Field %s (%s) exists already'%(name,alias)) error_happened = True if error_happened: arcpy.AddError("Either enable 'Overwrite output fields' in the tool dialog box\n\ Or delete/rename the existing fields") raise StandardError, "Can't overwrite output data" arcpy.SetProgressor("step", "Checking output columns", 0, outlength, 1) # create fields if needed for i,(name,alias) in enumerate(zip(names,alii)): arcpy.SetProgressorPosition(i) if name not in [x.name for x in arcpy.ListFields(table)]: arcpy.AddMessage(' Field %s (%s) not present, adding'%(name,alias)) arcpy.AddField_management(table,name,'FLOAT',field_alias=alias) else: arcpy.AddMessage(' Field %s (%s) exists already, overwriting'%(name,alias)) arcpy.SetProgressorPosition(outlength)
def calculateAreas(inputFC, outputFC): """Creates a new feature class from the input polygon feature class and adds a field that includes the area of the polygons. INPUTS: inputFC (str): path to the input feature class outputFC (str): path to the output feature class """ #### Validate Output Workspace #### ERROR.checkOutputPath(outputFC) outPath, outName = OS.path.split(outputFC) #### Create SSDataObject #### ssdo = SSDO.SSDataObject(inputFC, templateFC = outputFC, useChordal = False) #### Assure Polygon FC #### if ssdo.shapeType.lower() != "polygon": ARCPY.AddIDMessage("ERROR", 931) raise SystemExit() #### Check Number of Observations #### cnt = UTILS.getCount(inputFC) ERROR.errorNumberOfObs(cnt, minNumObs = 1) #### Copy Features #### try: clearCopy = UTILS.clearExtent(DM.CopyFeatures) clearCopy(inputFC, outputFC) except: ARCPY.AddIDMessage("ERROR", 210, outputFC) raise SystemExit() #### Add Area Field #### areaFieldNameOut = ARCPY.ValidateFieldName(areaFieldName, outPath) if not ssdo.allFields.has_key(areaFieldNameOut): UTILS.addEmptyField(outputFC, areaFieldNameOut, "DOUBLE") #### Calculate Field #### clearCalc = UTILS.clearExtent(DM.CalculateField) clearCalc(outputFC, areaFieldNameOut, "!shape.area!", "PYTHON_9.3")
def add_multiple_fields_to_table(table, idfield, names, alii, datatype, arcid_to_data_function): names = [arcpy.ValidateFieldName(x, os.path.dirname(table)) for x in names] arcpy.AddMessage('adding fields: ' + ','.join(names)) for name, alias in zip(names, alii): if name not in [x.name for x in arcpy.ListFields(table)]: arcpy.AddField_management(table, name, datatype, field_alias=alias) else: arcpy.AddWarning('field %s exists already, overwriting' % name) arcpy.AddMessage('added fields... populating now...') rows = arcpy.UpdateCursor(table) for row in rows: values = arcid_to_data_function(row.getValue(idfield)) for i, name in enumerate(names): row.setValue(name, values[i]) rows.updateRow(row) del row del rows arcpy.AddMessage('...done')
def value_calculate(path, name, x, y, fieldName, fieldName_, expression): # Automate field calculator for fieldName fieldName1 = arcpy.ValidateFieldName(fieldName_) arcpy.AddField_management(path, fieldName1, "DOUBLE", "", "", 50) arcpy.CalculateField_management(path, fieldName1, expression, "PYTHON_9.3") # Create fcName variable by importing duplicate_name1 function fcName = duplicated_name1(name, x, y) # Automate copy feature function arcpy.CopyFeatures_management(path, outputFolder + "\\" + fcName) path1 = outputFolder + "\\" + fcName + ".shp" cursor = arcpy.da.UpdateCursor(path1, [fieldName]) for row in cursor: if row[0] == -9999: cursor.deleteRow() del row del cursor statsfield = arcpy.da.FeatureClassToNumPyArray(path1, fieldName) u = statsfield[fieldName].mean() arcpy.Delete_management(path1) return u