def listFields(featureClass): fields=ListFields(featureClass) out=dict() for fld in fields: if (fld.name.lower() not in ('shape_length','shape_area','shape.len','shape.length','shape_len','shape.area') and fld.name.find(".")==-1): out[fld.name]=fld.type return out
def addMissingFields(fcToAddFieldsTo): currentFieldObjectsList = ListFields(fcToAddFieldsTo) currentFieldNames = [x.name for x in currentFieldObjectsList] for fieldToCheckForAndAddItem in fieldsToCheckForAndAdd: if fieldToCheckForAndAddItem not in currentFieldNames: if fieldToCheckForAndAddItem == n1RouteId: routeIDFieldName = fieldToCheckForAndAddItem routeIDFieldType = "TEXT" routeIDFieldLength = 50 routeIDFieldAlias = routeIDFieldName AddField_management(fcToAddFieldsTo, routeIDFieldName, routeIDFieldType, "", "", routeIDFieldLength, routeIDFieldAlias, nullable) else: routeMeasureFieldName = fieldToCheckForAndAddItem routeMeasureFieldType = "DOUBLE" routeMeasureFieldPrecision = 38 routeMeasureFieldScale = 8 routeMeasureFieldAlias = routeIDFieldName AddField_management(fcToAddFieldsTo, routeMeasureFieldName, routeMeasureFieldType, routeMeasureFieldPrecision, routeMeasureFieldScale, "", routeMeasureFieldAlias, nullable) else: pass
def ExamineGDB(gdb): import ntpath, re reviewpath = ntpath.basename(gdb) from arcpy import env, ListWorkspaces, ListDatasets, ListTables, ListFeatureClasses, GetCount_management, Compact_management, ListFields #set the workspace from the config file env.workspace = ntpath.dirname(gdb) ng911 = gdb print "geodatabases" print ng911 env.workspace = ng911 datasets = ListDatasets() print "Datasets:" for dataset in datasets: print " " + str(dataset) tables = ListTables() print " tables:" for table in tables: fcc = GetCount_management(table) print " " + str(table) fd = datasets[0] fcs = ListFeatureClasses("", "", fd) for fc in fcs: fields = ListFields(fc) fcc = GetCount_management(fc) print fc + ", " + str(fcc) + " features" for field in fields: print " " + str(field.name) + ", " + str(field.type) checkfile = reviewpath + "/" + ntpath.basename(ng911) topo = fd + "/NG911_Topology" Compact_management(ng911)
def extract_attachments(att_table, out_folder, att_field='file_name'): fields = ['DATA', 'ATT_NAME', 'ATTACHMENTID', att_field] # check for existence of required fields has_fields = [f.name for f in ListFields(att_table)] for f in fields: if f not in has_fields: AddError('Field {} is required in attribute table'.format(f)) # verify path verify_path_exists(out_folder) with UpdateCursor(att_table, fields) as cursor: for row in cursor: # get the attachment file and create a filename attachment = row[0] filename = 'ATT_{2}_{1}'.format(*row) # write the output file and update the row's value to the file name open(join(out_folder, filename), 'wb').write(attachment.tobytes()) row[3] = filename cursor.updateRow(row) # cleanup del row del filename del attachment
def TrackEditsAndVersion(): env.workspace = OracleDB fclist = ListFeatureClasses() for fc in fclist: print fc if str(fc)[-2:] == 'MV': print "no actions taken on a Materialized View" pass else: if ListFields(fc, "GlobalID"): print "GlobalID Field already added" else: AddField_management(OracleDB + "/" + fc, "GlobalID", "GUID", "#", "#", "#", "GlobalID", "NON_NULLABLE", "REQUIRED") AddField_management(OracleDB + "/" + fc, "START_DATE", "DATE", "#", "#", "#", "Start_Date", "NULLABLE", "NON_REQUIRED") AddField_management(OracleDB + "/" + fc, "END_DATE", "DATE", "#", "#", "#", "End_Date", "NULLABLE", "NON_REQUIRED") EnableEditorTracking_management(OracleDB + "/" + fc, creator_field="Creator", creation_date_field="Created", last_editor_field="Editor", last_edit_date_field="Edited", add_fields="ADD_FIELDS", record_dates_in="UTC") RegisterAsVersioned_management(OracleDB + "/" + fc, "NO_EDITS_TO_BASE")
def tbl_arr(pth): """Convert featureclass/table to a structured ndarray Requires -------- pth : string path to input featureclass or table """ flds = ListFields(pth) nulls = null_dict(flds) bad = ['OID', 'Geometry', 'Shape_Length', 'Shape_Area'] f0 = ["OID@"] f1 = [i.name for i in flds if i.type not in bad] flds = f0 + f1 a = TableToNumPyArray(pth, field_names=flds, skip_nulls=False, null_value=nulls) dt = np.array(a.dtype.descr) nmes = dt[:, 0] sze = dt[:, 1] cleaned = [] for i in nmes: i = de_punc(i) # run de_punc to remove punctuation cleaned.append(i) a.dtype = list(zip(cleaned, sze)) return a
def fld_info(in_fc, prn=False): """Field information for a featureclass (in_fc). Parameters: ----------- prn : boolean True - returns the values False - simply prints the results Field properties: ----------------- 'aliasName', 'baseName', 'defaultValue', 'domain', 'editable', 'isNullable', 'length', 'name', 'precision', 'required', 'scale', 'type' """ flds = ListFields(in_fc) f_info = [(i.name, i.type, i.length, i.isNullable, i.required) for i in flds] f = "{!s:<14}{!s:<12}{!s:>7} {!s:<10}{!s:<10}" if prn: frmt = "FeatureClass:\n {}\n".format(in_fc) args = ["Name", "Type", "Length", "Nullable", "Required"] frmt += f.format(*args) + "\n" frmt += "\n".join([f.format(*i) for i in f_info]) tweet(frmt) return None return f_info
def add_iso_attributes(): """Append attributes from the original max stops data to the isochrones feature class, matching features stop id's field (which are in the 'stop_id' and 'name' fields """ rail_stop_dict = dict() s_fields = [ID_FIELD, STOP_FIELD, ROUTES_FIELD, ZONE_FIELD, YEAR_FIELD] with SearchCursor(MAX_STOPS, s_fields) as s_cursor: sid_ix = s_cursor.fields.index(ID_FIELD) for row in s_cursor: stop_id = row[sid_ix] rail_stop_dict[stop_id] = list(row) # area value will be used to check for errors in isochrone creation iso_fields = [f.name for f in ListFields(ISOCHRONES)] area_field = 'area' if area_field not in iso_fields: f_type = 'DOUBLE' AddField(ISOCHRONES, area_field, f_type) area_val = 'SHAPE@AREA' u_fields = s_fields + [area_field, area_val] with UpdateCursor(ISOCHRONES, u_fields) as u_cursor: sid_ix = u_cursor.fields.index(ID_FIELD) val_ix = u_cursor.fields.index(area_val) for row in u_cursor: stop_id = row[sid_ix] area = row[val_ix] i_row = rail_stop_dict[stop_id] i_row.extend([area, area]) u_cursor.updateRow(i_row)
def __check_exist_in_db(self, rpuid, fc_list): """ Checks if the rpuid exists in the given database. :type fc_list: list """ try: new_fc_list = list(fc_list) for fc in self.feature_classes: if new_fc_list: fc_name = fc.split('.')[-1] if fc_name in new_fc_list or fc_name[:-2] in new_fc_list: try: new_fc_list.remove(fc_name) except ValueError: try: new_fc_list.remove(fc_name[:-2]) except ValueError: pass fields = [f.name for f in ListFields(fc)] if "rpuid" in fields: rpauid_f = AddFieldDelimiters(fc, "rpuid") with SearchCursor(fc, ["rpuid"], where_clause="{0}='{1}'".format(rpauid_f, str(rpuid))) as c: for _ in c: return True return False except Exception as e: self.log.exception(e.message) raise Exit()
def show(self): print("\n") print("show 5 first table rows for file:", '\x1b[1;36m' + self.filepath + '\x1b[0m') #ainsi colors list = [] fields = ListFields(self.filepath) for field in fields: list.append(field.name) list.remove("Shape") header = [] for field in list: header.append(str('{:_^20}'.format(field))) print(header) count = 1 with SearchCursor(self.filepath, list) as cursor: line = [] for row in cursor: for col in row: line.append(str('{:^20}'.format(col))) print(line) line = [] if count >= 5: break count += 1 del cursor
def ListFieldNames(item): # create a list of field names fields = ListFields(item) fieldList = [] for f in fields: fieldList.append(f.name.upper()) return fieldList
def OffsetDirectionMatrix(gdb): #select the intersected coordinate that best describes the reported location of the on road from the intersection based on the CrashOffsetPoints function from arcpy import AddXY_management, AddJoin_management, ListFields, da, SelectLayerByAttribute_management, AddFieldDelimiters GeocodedLayer = 'Geocoding Result: Geocoding_Result_9' IntersectLayer = 'Geocoding_Result_9_Buffer_In' AddXY_management(IntersectLayer) AddJoin_management(IntersectLayer, "ACCIDENT_KEY", GeocodedLayer, "ACCIDENT_KEY") FieldsList = ListFields(IntersectLayer) CursorFieldList = [ 'X', 'Y', 'AT_ROAD_KDOT_DIRECTION', 'POINT_X', 'POINT_Y', 'OBJECTID', 'ACCIDENT_KEY' ] # cursor to add to list the Accident IDs and Object IDs CoordFinder = da.SearchCursor(IntersectLayer, CursorFieldList) # @UndefinedVariable coordlist = [] rowDictionary = dict() for row in CoordFinder: print #print('{0}, {1}, {2}, {3}, {4}'.format(row[0], row[1], row[2], row[3], row[4])) if str(row[2]) == "E": print row[0] EastCoord = max(row[0], row[3]) coordlist.append(EastCoord) rowDictionary print coordlist FinalEastCoordinate = max(coordlist) FinalEastCoordInt = int(FinalEastCoordinate) print FinalEastCoordinate CoordSelExpression = 'POINT_X -' + str(FinalEastCoordInt) + " < 1" SelectLayerByAttribute_management(IntersectLayer, "NEW_SELECTION", CoordSelExpression)
def checkValuesAgainstDomain(gdb, folder): userMessage("Checking field values against approved domains...") from arcpy import ListFields from arcpy.da import Walk, SearchCursor from os import path #get list of fields with domains fieldsWDoms = fieldsWithDomains() for dirpath, dirnames, filenames in Walk(gdb, True, '', False, ["Table", "FeatureClass"]): for filename in filenames: fields = [] fullPath = path.join(gdb, filename) #create complete field list fields = ListFields(fullPath) fieldNames = [] for field in fields: fieldNames.append((field.name).upper()) #see if fields from complete list have domains for fieldN in fieldNames: #if field has a domain if fieldN in fieldsWDoms: #get the full domain dictionary domainDict = getFieldDomain(fieldN, folder) #put domain values in a list domainList = [] for val in domainDict.iterkeys(): domainList.append(val.upper()) #loop through records for that particular field to see if all values match domain wc = fieldN + " is not null" with SearchCursor(fullPath, ("OBJECTID", fieldN), wc) as rows: for row in rows: if row[1] is not None: #see if field domain is actually a range if fieldN == "HNO": hno = row[1] if hno > 999999 or hno < 0: userMessage( filename + ": " + str(row[0]) + " value " + str(row[1]) + " not in approved domain for field " + fieldN) #otherwise, compare row value to domain list else: if row[1].upper() not in domainList: userMessage( filename + ": " + str(row[0]) + " value " + str(row[1]) + " not in approved domain for field " + fieldN) userMessage("Completed checking fields against domains")
def _fields(self): """Determines the field names in the table """ try: result = [f.name.upper() for f in ListFields(self.full_path)] # If a feature is a network dataset or topology, no fields exist # and a RuntimeError is raised except RuntimeError: result = list() return result
def extract_attachments(att_table, out_folder, group_by_field=None): # [<Field>, ...] l_fields = ListFields(att_table) # [dbo.schema.fieldname, ...] field_names = [f.name for f in l_fields] # [DBO.SCHEMA.FIELDNAME, ...] uppercase = [f.upper() for f in field_names] data_field = None name_field = None id_field = None data_field = [f for f in uppercase if 'DATA' in f.split('.')][0] name_field = [f for f in uppercase if 'ATT_NAME' in f.split('.')][0] id_field = [f.name for f in l_fields if f.type == 'OID'][0] fields = [data_field, name_field, id_field] AddMessage(fields) if group_by_field: if not group_by_field in field_names: raise Exception('Field {} not found in fields. \n'.format(group_by_field, str(field_names))) fields.append(group_by_field) # verify path verify_path_exists(out_folder) with SearchCursor(att_table, fields) as cursor: for row in cursor: full_out_folder = out_folder if group_by_field: # get the field name group_folder = row[ fields.index(group_by_field) ] full_out_folder = join(out_folder, group_folder) # double check folder path verify_path_exists(full_out_folder) # get the attachment file and create a filename attachment = row[0] filename = 'ATT_{2}_{1}'.format(*row) # write the output file and update the row's value to the file name open(join(full_out_folder, filename), 'wb').write(attachment.tobytes()) # cleanup del row del filename del attachment
def infos_fields(self, lyr_path, dico_lyr, dico_fields): u""" get the informations about fields definitions """ fields = ListFields(lyr_path.dataSource) dico_lyr[u'num_fields'] = len(fields) for field in fields: if field.name not in [u'FID', u'SHAPE', u'Shape', u'OBJECTID']: dico_fields[field.name] = field.type, field.length, field.precision,\ field.aliasName, field.required else: pass # end of function return dico_fields
def BuildWhereClauseLike(table, field, value): """Constructs a SQL WHERE clause to select rows having the specified value within a given field and table.""" # Add DBMS-specific field delimiters fieldDelimited = AddFieldDelimiters(table, field) # Determine field type fieldType = ListFields(table, field)[0].type #set the value to look for #expression = "KTRIPSRoutes_"+ShapeFileDate[:2]+"_"+ShapeFileDate[2:8] expression = "%" + value + "%" # Add single-quotes for string field values if str(fieldType) == 'String': expression = "'%s'" % expression # Format WHERE clause whereClauseLike = "%s like %s" % (fieldDelimited, expression) return whereClauseLike
def checkRequiredFields(gdb, folder, esb): userMessage("Checking that required fields exist...") from os import path from arcpy import ListFields from arcpy.da import Walk from time import strftime #get today's date today = strftime("%m/%d/%y") values = [] #get required fields rfDict = getRequiredFields(folder) #walk through the tables/feature classes for dirpath, dirnames, filenames in Walk(gdb, True, '', False, ["Table", "FeatureClass"]): for filename in filenames: fields = [] fullPath = path.join(gdb, filename) #list fields fs = ListFields(fullPath) for f in fs: fields.append(f.name.upper()) #get the keyword to acquire required field names keyword = getKeyword(filename, esb) #get the appropriate comparison list if keyword in rfDict: comparisonList = rfDict[keyword] ## print comparisonList #loop through required fields to make sure they exist in the geodatabase for comparisonField in comparisonList: if comparisonField.upper() not in fields: report = filename + " does not have required field " + comparisonField userMessage(report) #add issue to list of values val = (today, report, "Field") values.append(val) #record issues if any exist if values != []: RecordResults("template", values, gdb) userMessage("Completed check for required fields")
def createAndUpdateAccTable(sourceTableGDB, sourceTableName, destinationTableGDB, destinationTableName): sourceTableFullPath = os.path.join(sourceTableGDB, sourceTableName) destinationTableFullPath = os.path.join(destinationTableGDB, destinationTableName) print "Starting the AccTable transfer." if Exists(destinationTableFullPath): pass else: env.workspace = sourceTableGDB MakeTableView_management(sourceTableFullPath, "sourceView") # Uses the sourceView table view as a template for the table creation to carry over the field information. CreateTable_management(destinationTableGDB, destinationTableName, "sourceView") pass env.workspace = destinationTableGDB fieldObjectList = ListFields(sourceTableFullPath) fieldList = [field.name for field in fieldObjectList if field.name != "OBJECTID"] tableDataList = list() # Use a searchCursor to read the data in from the sourceTable. sCursor = SearchCursor(sourceTableFullPath, fieldList) for cursorItem in sCursor: tableDataList.append(cursorItem) try: del sCursor except: pass # Use an insertCursor to insert the data into the destinationTable. iCursor = InsertCursor(destinationTableFullPath, fieldList) for tableDataItem in tableDataList: returnedOID = iCursor.insertRow(tableDataItem) print "Inserted a row with an objectID of: " + str(returnedOID) try: del iCursor except: pass
def checker(self): # type: () -> bool try: env.workspace = self.in_db for dataset in ListDatasets(): for fc in ListFeatureClasses(feature_dataset=dataset): self.__fc = fc self.__fc_fields = ListFields(self.__fc) for installation in self.__get_installations(fc): if installation: self.__installation_field_check(installation) except Exception as e: self.log.exception(e.message) raise Exit() else: self.__write_result_to_table(self.__attributes) return True
def add_name_field(): """Only a field called 'name' will be retained when locations are loaded into a service area analysis, as the MAX stops will be. This field is populated that field with unique identifiers so that the other attributes from this data can be linked to the network analyst output """ fields = [f.name for f in ListFields(MAX_STOPS)] if UNIQUE_FIELD not in fields: f_type = 'LONG' AddField(MAX_STOPS, UNIQUE_FIELD, f_type) u_fields = [ID_FIELD, UNIQUE_FIELD] with UpdateCursor(MAX_STOPS, u_fields) as cursor: for stop_id, name in cursor: name = stop_id cursor.updateRow((stop_id, name))
MakeTableView_management( r"Database Connections\\ATLASPROD.odc\\NM3.NM_INV_ITEMS", "Items") MakeTableView_management( r"Database Connections\\ATLASPROD.odc\\NM3.NM_MEMBERS", "Members") MakeTableView_management( r"Database Connections\\ATLASPROD.odc\\NM3.NM_ELEMENTS", "Elements") lyrlist = linelyrlist print lyrlist for lyr in lyrlist: lyrname = str(lyr) + "_C" addlyr = ws + "\\" + tempmdb + "\\" + lyr MakeFeatureLayer_management(addlyr, lyrname) lyr = lyr + "_C" IDField = ListFields(lyr, "*NE_ID*", "Integer") for field in IDField: PKfield = "{}".format(field.name) AddJoin_management(lyr, PKfield, "Members", "NM_NE_ID_IN", "KEEP_COMMON") AddJoin_management(lyr, "NM_NE_ID_OF", "Elements", "NE_ID", "KEEP_COMMON") print str(lyr) + " elements table joined" for lyr in linelyrlist: print lyr + "_C is the layer I'm linear referencing now..." outlyr = ws + "\\" + tempgdb + "\\" + str(lyr) + "_ln_1" lyr = lyr + "_C" LocateFeaturesAlongRoutes_lr( lyr, "CMLRS", "LRS_KEY", '0.5 feet', outlyr, "LRS_KEY LINE Beg_Cnty_Logmile End_Cnty_Logmile", "FIRST", "DISTANCE",
def field_exists(in_fc, in_field): from arcpy import ListFields if in_field in [f.name for f in ListFields(in_fc)]: return True else: return False
def labelAngleNormalization(quarterOrHalf): if quarterOrHalf.lower() == "quarter": countyBorderFeature = countyBorderFeature_Q countyRoadNameRosette = countyRoadNameRosette_Q elif quarterOrHalf.lower() == "half": countyBorderFeature = countyBorderFeature_H countyRoadNameRosette = countyRoadNameRosette_H else: print "quarterOrHalf variable not correctly defined." raise (Exception("quarterOrHalf error.")) print "Normalizing the label angle values." if "COUNTY_NAME" not in ListFields(countyRoadNameRosette): AddField_management(countyRoadNameRosette, "COUNTY_NAME", "TEXT", "", "", "55") else: pass newCursor = daSearchCursor(countyBorderFeature, countyBorderFields) countyTranslationDictionary = dict() # countyBorderItem[3] is the number, countyBorderItem[2] is the name. # -- Use for translating from county number to county name. for countyBorderItem in newCursor: if countyBorderItem[3] not in countyTranslationDictionary: countyTranslationDictionary[ countyBorderItem[3]] = countyBorderItem[2] else: pass if "newCursor" in locals(): del newCursor else: pass newCursor = daUpdateCursor(countyRoadNameRosette, countyRoadNameRosetteFields) for countyPointItem in newCursor: countyPointItem = list(countyPointItem) # Takes the remainder of the angle divided by 360. # Uses fmod due to floating point issues with the normal modulo operator in python. countyPointItem[0] = math.fmod(countyPointItem[0], 360) # countyPointItem[1] is County Name, countyPointItem[2] is County Number. if countyPointItem[0] >= 250 and countyPointItem[0] <= 290: countyPointItem[0] = 270 if countyPointItem[2] in countyTranslationDictionary: countyPointItem[1] = countyTranslationDictionary[ countyPointItem[2]] else: countyPointItem[1] = "" newCursor.updateRow(countyPointItem) elif countyPointItem[0] >= 160 and countyPointItem[0] <= 200: countyPointItem[0] = 180 if countyPointItem[2] in countyTranslationDictionary: countyPointItem[1] = countyTranslationDictionary[ countyPointItem[2]] else: countyPointItem[1] = "" newCursor.updateRow(countyPointItem) elif countyPointItem[0] >= 70 and countyPointItem[0] <= 110: countyPointItem[0] = 90 if countyPointItem[2] in countyTranslationDictionary: countyPointItem[1] = countyTranslationDictionary[ countyPointItem[2]] else: countyPointItem[1] = "" newCursor.updateRow(countyPointItem) elif (countyPointItem[0] >= 0 and countyPointItem[0] <= 20) or (countyPointItem[0] >= 340 and countyPointItem[0] <= 360): countyPointItem[0] = 0 if countyPointItem[2] in countyTranslationDictionary: countyPointItem[1] = countyTranslationDictionary[ countyPointItem[2]] else: countyPointItem[1] = "" newCursor.updateRow(countyPointItem) else: print "Deleting a row for having an angle more than 20 degrees away from a cardinal direction." newCursor.deleteRow() if "newCursor" in locals(): del newCursor else: pass print "Label angle normalization complete!" print "Done extending and intersecting road features." # Need to break this into two pieces and pass some of the inmemorylayers
def ReportResolutionOrdering(): # Create a list to hold the rows from the cursor. holderList = list() # Connection to the feature class fc = connection1 + "CCL_Report" testForField = "RESOLUTION_ORDER" fieldSeen = 0 # Look for the RESOLUTION_ORDER field in the table. fieldCheckList = ListFields(fc) for fieldCheckElement in fieldCheckList: if str.upper(str(fieldCheckElement.name)) == str.upper(testForField): fieldSeen += 1 # If the RESOLUTION_ORDER field does not yet exist, add it. if fieldSeen == 0: print "Adding the Resolution_Order field to the CCL_Report table." AddField_management(connection1 + "CCL_Report", "RESOLUTION_ORDER", "SHORT") print "Populating Resolution_Order with new values." else: print "The Resolution_Order field already exists within the CCL_Report table." print "Updating the Resolution_Order values." # Start the cursor to retrieve the rows from the feature class. #fieldList = list() fieldList = [ 'OBJECTID', 'CCL_LRS', 'CCL_BEGIN', 'DESCRIPTION', 'CITY', 'RESOLUTION_ORDER' ] # Cursor to read the all the fields and place them in an array. cursor = da.SearchCursor(fc, fieldList) # @UndefinedVariable for row in cursor: listRow = list(row) holderList.append(listRow) if cursor: del cursor else: pass if row: del row else: pass # Create a dictionary to store the rows by City. rowListDictionary = {} # Loop through the list to build a dictionary with CCL_Routes as keys. for heldRow in holderList: # Each key will hold a list of lists. rowListContainer = list() # If the key already exists, assign the previous list of lists # to the list container, then append the new list # before updating the key in the dictionary. if heldRow[1] in rowListDictionary: rowListContainer = rowListDictionary[heldRow[1]] rowListContainer.append(heldRow) rowListDictionary[heldRow[1]] = rowListContainer # Otherwise, the key needs to be created # with the list container having only one list contained # within it for now. else: rowListContainer.append(heldRow) rowListDictionary[heldRow[1]] = rowListContainer for cclKey in rowListDictionary: outListContainer = rowListDictionary[cclKey] # Sort based on CCL_Begin. outListContainer.sort(key=lambda sortingRow: sortingRow[2]) countVariable = 0 descVariable = '' for outListIndex, outList in enumerate(outListContainer): # Is this the first list/row in the key's list container? # If so, then set the Resolution_Order to 0 if outListIndex == 0: outList[5] = 0 descVariable = outList[3] else: currentDesc = outList[3] # Check to see if the previous description is the same # as the current description. if currentDesc == descVariable: # If so, set the Resolution_Order to # the current countVariable # and do not increment it. outList[5] = countVariable else: # The current desc is different than # the previous desc, so update # the count variable prior # to assignment. countVariable += 1 outList[5] = countVariable descVariable = outList[3] outListContainer[outListIndex] = outList rowListDictionary[cclKey] = outListContainer # Need to add an update cursor that will update # the RESOLUTION_ORDER field with # values from the rowListDictionary # based on the shared OBJECTID field. fieldList = list() fieldList = ['OBJECTID', 'CCL_LRS', 'RESOLUTION_ORDER'] cursor = da.UpdateCursor(fc, fieldList) # @UndefinedVariable for row in cursor: cclKey = row[1] outListContainer = rowListDictionary[cclKey] for outList in outListContainer: #print "City: " + str(outList[4]) + " ObjectID: " + str(outList[0]) + " Order: " + str(outList[5]) # For Testing if row[0] == outList[0]: # If the ObjectID for the list in the list container # for the matching CCL_LRS equals the OBJECTID # field in the cursor row, update the # cursor row's RESOLUTION_ORDER field # to be the same as that list's # resolution order field. row[2] = outList[5] else: pass cursor.updateRow(row) if cursor: del cursor else: pass if row: del row else: pass
def createShortGradiculeLinesForEachCounty(): # Get/use the same projection as the one used for the county roads. spatialReferenceProjection = Describe( sharedNonStateSystem).spatialReference env.workspace = sqlGdbLocation inputCountyGradicule = countyCountyGradicule bufferedCounties = 'bufferedCounties' countiesToCopy = 'countiesToCopy' gradiculeToCopy = 'gradiculeToCopy' loadedGradiculeCopy = 'loadedGradiculeCopy' loadedTempGradicule = 'loadedTempGradicule' #unBufferedCounties = 'unBufferedCounties' # Using the miniBuffered process changes it from # 1457 total output features to 1481 (at 2.1k) # total output features. miniBufferedCounties = 'miniBufferedCounties' loadedOutputGradicule = 'loadedOutputGradicule' tempCounties = r'in_memory\tempCounties' tempCountyGradicule = r'in_memory\tempCountyGradicule' tempCountyGradiculePostErase = r'in_memory\tempCountyGradiculePostErase' tempCountyGradiculeSinglePart = r'in_memory\tempCountyGradiculeSinglePart' bufferCursorFields = ["OBJECTID", "COUNTY_NAME"] MakeFeatureLayer_management(sharedCounties, countiesToCopy) MakeFeatureLayer_management(countyCountyGradicule, gradiculeToCopy) CopyFeatures_management(gradiculeToCopy, countyGradiculeCopied) MakeFeatureLayer_management(countyGradiculeCopied, loadedGradiculeCopy) # Might be worth dissolving based on COORD & County_Name prior # to removing the County_Name field, if that's a possibility. # Or better yet, just make it so that the Gradicule lines for # a particular county are eligible for intersecting and # erasing with that same county's polygon's. All we're # trying to do here is make it so that the county's original # gradicule lines are about half of their original size. # Don't need to find out which gradicule lines are close to # the county or anything else like that. Just need to reduce # the size of the lines and keep the parts that are nearest # the county that they go with. # Remove the County_Name field so that the intersect can add it # back and populate it only where the county buffer actually # intersects the lines. #DeleteField_management(countyGradiculeCopied, "County_Name") # Elaine requested that this be 1000 Feet shorter. # I made it 2000 feet shorter, because it still seemed too big. Buffer_analysis(sharedCounties, countiesBuffered, "8000 Feet") Buffer_analysis(sharedCounties, countiesMiniBuffered, "1500 Feet") bufferedCountyPolygonList = list() outputFeatureList = list() # 1st SearchCursor newCursor = daSearchCursor(countiesBuffered, bufferCursorFields) for newRow in newCursor: bufferedCountyPolygonList.append(list(newRow)) if 'newCursor' in locals(): del newCursor else: pass MakeFeatureLayer_management(countiesBuffered, bufferedCounties) MakeFeatureLayer_management(countiesMiniBuffered, miniBufferedCounties) loadedCountiesFields = ListFields(bufferedCounties) for loadedCountiesField in loadedCountiesFields: print "A loadedCountiesField was found: " + str( loadedCountiesField.name) countyGradiculeFields = ListFields(loadedGradiculeCopy) for countyGradiculeField in countyGradiculeFields: print "A countyGradiculeField was found: " + str( countyGradiculeField.name) for listedRow in bufferedCountyPolygonList: print str(listedRow) selectCounty = listedRow[1] whereClause = """ "COUNTY_NAME" = '""" + str(selectCounty) + """' """ print "The whereClause is " + str(whereClause) SelectLayerByAttribute_management(bufferedCounties, "NEW_SELECTION", whereClause) SelectLayerByAttribute_management(loadedGradiculeCopy, "NEW_SELECTION", whereClause) Intersect_analysis([loadedGradiculeCopy, bufferedCounties], tempCountyGradicule, "ALL") MultipartToSinglepart_management(tempCountyGradicule, tempCountyGradiculeSinglePart) # Selects the same county as the other Select, but does it from the miniBufferedCounties # so that the lines which lay inside of the county and running just along its edges # are erased, as they should only exist as gradicules for the counties adjoining this # one, but not for this one itself. SelectLayerByAttribute_management(miniBufferedCounties, "NEW_SELECTION", whereClause) MakeFeatureLayer_management(tempCountyGradiculeSinglePart, loadedTempGradicule) SelectLayerByAttribute_management(loadedTempGradicule, "NEW_SELECTION", whereClause) secVerGradiculeFields = ListFields(loadedTempGradicule) #for secVerGradiculeField in secVerGradiculeFields: # print "A secVerGradiculeField was found: " + str(secVerGradiculeField.name) Erase_analysis(loadedTempGradicule, miniBufferedCounties, tempCountyGradiculePostErase, xyToleranceVal) fieldsToCopy = [ "SHAPE@", "County_Number", "County_Name", "DIRECTION", "COORD" ] # 2nd SearchCursor newCursor = daSearchCursor(tempCountyGradiculePostErase, fieldsToCopy) for newRow in newCursor: outputFeatureList.append(newRow) if 'newCursor' in locals(): del newCursor else: pass try: Delete_management(countyGradiculeShortWithUser) except: pass CreateFeatureclass_management(sqlGdbLocation, countyGradiculeShortNoPath, "POLYLINE", "", "", "", spatialReferenceProjection) AddField_management(countyGradiculeShortNoPath, "County_Number", "DOUBLE", "", "", "") AddField_management(countyGradiculeShortNoPath, "County_Name", "TEXT", "", "", "55") AddField_management(countyGradiculeShortNoPath, "DIRECTION", "TEXT", "", "", "5") AddField_management(countyGradiculeShortNoPath, "COORD", "TEXT", "", "", "30") print "First Intersected County Gradicule Row: " + str( outputFeatureList[0]) newCursor = daInsertCursor(countyGradiculeShortPath, fieldsToCopy) counter = 1 for outputFeature in outputFeatureList: rowToInsert = ([outputFeature]) insertedOID = newCursor.insertRow(outputFeature) counter += 1 print "Inserted Row with Object ID of " + str(insertedOID) # Load the feature class. Remove anything shorter than 850 feet. MakeFeatureLayer_management(countyGradiculeShortPath, loadedOutputGradicule) # Select the rows that have geometry which is shorter than 850 feet. ## Note that Shape.STLength() returns units in the projection ## or coordinate system that it the feature class is stored in. whereClause = """ Shape.STLength() < 850 """ print "The whereClause is " + str(whereClause) SelectLayerByAttribute_management(loadedOutputGradicule, "NEW_SELECTION", whereClause) # If there is at least one row selected, delete each selected row. if int(GetCount_management(loadedOutputGradicule).getOutput(0)) > 0: print str(GetCount_management(loadedOutputGradicule).getOutput( 0)) + "rows selected." DeleteRows_management(loadedOutputGradicule) else: print "No rows were selected to delete." if 'newCursor' in locals(): del newCursor else: pass
def createCountyLinesForEachCounty(): env.workspace = sqlGdbLocation inputCountyLines = sharedCountyLines inputCountyPolygons = sharedCounties dissolvedCountyLines = countyLinesDissolved loadedCounties = 'loadedCounties' tempCountyLines = r'in_memory\tempCountyLines' outputCountyLines = countyLinesIntersectedNoPath bufferCursorFields = ["OBJECTID"] # Dissolve all of those county lines into one set of lines # then, need to create 105 features that are are intersected # with the polygons from said line dissolve. Dissolve_management(inputCountyLines, dissolvedCountyLines) Buffer_analysis(inputCountyPolygons, countiesBuffered, "15500 Feet") bufferedCountyPolygonList = list() outputFeatureList = list() # 1st SearchCursor newCursor = daSearchCursor(countiesBuffered, bufferCursorFields) for newRow in newCursor: bufferedCountyPolygonList.append(list(newRow)) if 'newCursor' in locals(): del newCursor else: pass MakeFeatureLayer_management(countiesBuffered, loadedCounties) loadedCountiesFields = ListFields(loadedCounties) for loadedCountiesField in loadedCountiesFields: print "A loadedCountiesField was found: " + str( loadedCountiesField.name) for listedRow in bufferedCountyPolygonList: selectNumber = listedRow[0] whereClause = """ "OBJECTID" = """ + str(selectNumber) print "The whereClause = " + str(whereClause) SelectLayerByAttribute_management(loadedCounties, "NEW_SELECTION", whereClause) Intersect_analysis([dissolvedCountyLines, loadedCounties], tempCountyLines, "ALL") # 2nd SearchCursor newCursor = daSearchCursor(tempCountyLines, ["SHAPE@", "County_Number", "County_Name"]) for newRow in newCursor: outputFeatureList.append(newRow) if 'newCursor' in locals(): del newCursor else: pass try: Delete_management(countyLinesIntersectedWithUser) except: pass CreateFeatureclass_management(sqlGdbLocation, outputCountyLines, "POLYLINE", "", "", "", spatialReferenceProjection) AddField_management(outputCountyLines, "County_Number", "DOUBLE", "", "", "") AddField_management(outputCountyLines, "County_Name", "TEXT", "", "", "55") print "First Intersected County Row: " + str(outputFeatureList[0]) countyLinesIntersectFields = ["SHAPE@", "County_Number", "County_Name"] newCursor = daInsertCursor(countyLinesIntersectedPath, countyLinesIntersectFields) counter = 1 for outputFeature in outputFeatureList: rowToInsert = ([outputFeature]) insertedOID = newCursor.insertRow(outputFeature) counter += 1 print "Inserted Row with Object ID of " + str(insertedOID) if 'newCursor' in locals(): del newCursor else: pass
def fast_join(fc_target, fc_target_keyfield, fc_join, fc_join_keyfield, fields_to_join): start_time = perf_counter() # make field dict for join fc fields {fname: [dtype, len]} jfields_names = [f.name for f in ListFields(fc_join)] jfields_dtypes = [f.type for f in ListFields(fc_join)] jfields_len = [f.length for f in ListFields(fc_join)] dts_lens = [[type, len] for type, len in zip(jfields_dtypes, jfields_len)] jfields_dict = dict(zip(jfields_names, dts_lens)) # field names in the target fc target_start_fields = [f.name for f in ListFields(fc_target)] # as needed, add field(s) to target FC if it doesn't already exist. print(f"Adding fields {fields_to_join} to target table {fc_target}...") import pdb pdb.set_trace() for jfield in fields_to_join: if jfield not in target_start_fields: ftype = jfields_dict[jfield][0] flen = jfields_dict[jfield][1] management.AddField(in_table=fc_target, field_name=jfield, field_type=ftype, field_length=flen) else: print( f"\t{jfield} already in {fc_target}'s fields. Will be OVERWRITTEN with joined data..." ) cur_fields = [fc_target_keyfield] + fields_to_join join_dict = {} print("reading data from join table...") with SearchCursor(fc_join, cur_fields) as scur: for row in scur: jkey = row[cur_fields.index(fc_join_keyfield)] vals_to_join = [ row[cur_fields.index(fname)] for fname in fields_to_join ] join_dict[jkey] = vals_to_join print("writing join data to target table...") with UpdateCursor(fc_target, cur_fields) as ucur: for row in ucur: jkey = row[cur_fields.index(fc_join_keyfield)] # if a join id value is in the target table but not the join table, # skip the join. The values in the resulting joined column will be null for these cases. if join_dict.get(jkey): vals_to_join = join_dict[jkey] else: continue row_out = [jkey] + vals_to_join row = row_out ucur.updateRow(row) elapsed_sec = round(perf_counter() - start_time, 1) print(f"Successfully joined fields {fields_to_join} from {fc_join} onto {fc_target}" \ f" in {elapsed_sec} seconds!")
def get_USGS_metadata(usgs_fc): """ Access the USGS site information REST API to get the basin area for all applicable sites. Adds the basinarea field to the FC and writes the data returned from the REST serivce. Required: usgs_fc -- the feature class of records from the AWDB Returns: None """ import urllib import gzip from re import search from arcpy import ListFields, AddField_management from arcpy.da import SearchCursor, UpdateCursor import io # check for area field and add if missing fields = ListFields(usgs_fc) for fieldname, datatype in NEW_FIELDS: for field in fields: if field.name == fieldname: break else: AddField_management(usgs_fc, fieldname, datatype) # get a list of station IDs in the FC stationIDs = [] with SearchCursor(usgs_fc, STATION_ID_FIELD) as cursor: for row in cursor: sid = row[0].split(":")[0] # valid USGS station IDs are between 8 and 15 char and are numerical if len(sid) >= 8 and not search('[a-zA-Z]', sid): stationIDs.append(sid) # setup and get the HTTP request request = urllib.request.Request( settings.USGS_URL, urllib.parse.urlencode({ "format": "rdb", # get the data in USGS rdb format "sites": ",".join(stationIDs), # the site IDs to get, separated by commas "siteOutput": "expanded" # expanded output includes basin area #"modifiedSince": "P" + str(SCRIPT_FREQUENCY) + "D" # only get records modified since last run }).encode('utf-8') ) # allow gzipped response request.add_header('Accept-encoding', 'gzip') response = urllib.request.urlopen(request) # check to see if response is gzipped and decompress if yes if response.info().get('Content-Encoding') == 'gzip': buf = io.BytesIO(response.read()) data = gzip.GzipFile(fileobj=buf) else: data = response # parse the response and create a dictionary keyed on the station ID stationAreas = {} for line in data.readlines(): line = line.decode('utf-8') if line.startswith('USGS'): # data elements in line (station record) are separated by tabs line = line.split('\t') # the 2nd element is the station ID, 3rd is the name, # and the 30th is the area # order in the tuple is important, # so data is entered into the correct fields in the table stationAreas[line[1]] = (line[29], line[1], line[2]) # write the response data to the FC fieldsToAccess = [STATION_ID_FIELD]+[name for name, datatype in NEW_FIELDS] with UpdateCursor(usgs_fc, fieldsToAccess) as cursor: for row in cursor: stationid = row[0].split(":")[0] try: # row[1] is area row[1] = float(stationAreas[stationid][0]) except KeyError: # in case no record was returned for ID # skip to next record continue except ValueError: # in case area returned is "" pass try: # row[2] is the USGS station ID row[2] = stationAreas[stationid][1] except ValueError: # in case ID returned is "" pass try: # row[3] is the USGS station name row[3] = stationAreas[stationid][2] except ValueError: # in case name returned is "" pass # no exception so data valid, update row cursor.updateRow(row)
def removeSmallRoads(): # Going to have to build a list of OIDs for roads # with a Shape length less than or equal to 1500. # Not going to have the SQL information to do a # selection based on a clause. # Could also add a field and then calculate the # length into it prior to running this selection. # Need to make a search cursor that gets the ObjectID and ShapeLength # for each road. # Then, need to add the ObjectID for roads with ShapeLength less than # 1500 to a list, then build SQL queries dynamically to select # and add features from that list, until the list is exhausted # and all features have been selected. print "Removing the small roads from the data." #CopyFeatures_management(countyRoadsFeature, countyRoadsFeaturePrereduction_Q) inMemoryRoadsLayer = 'inMemoryRoadsLayerFC' MakeFeatureLayer_management(countyRoadsFeature, inMemoryRoadsLayer) inMemRoadsFields = ListFields(inMemoryRoadsLayer) for inMemRoadField in inMemRoadsFields: print str(inMemRoadField.name) smallRoadsSCFields = ['ID2', 'Shape@Length'] smallRoadsSearchCursor = daSearchCursor(inMemoryRoadsLayer, smallRoadsSCFields) roadIDsToRemove = list() ''' for smallRoadRow in smallRoadsSearchCursor: if int(str(smallRoadRow[0])) % 500 == 0: print str(smallRoadRow[0]) else: pass raise("Stop error.") ''' for smallRoadRow in smallRoadsSearchCursor: if smallRoadRow[1] <= 1500: roadIDsToRemove.append(smallRoadRow[0]) else: pass roadRemovalCounter = 0 roadsReductionWhereClause = """ "ID2" IN (""" for roadID in roadIDsToRemove: if roadRemovalCounter <= 998: roadsReductionWhereClause = roadsReductionWhereClause + str( roadID) + """, """ roadRemovalCounter += 1 else: # Remove the trailing ", " and add a closing parenthesis. roadsReductionWhereClause = roadsReductionWhereClause[:-2] + """) """ SelectLayerByAttribute_management(inMemoryRoadsLayer, "ADD_TO_SELECTION", roadsReductionWhereClause) # Debug only print "Selecting..." selectedRoadsResult = GetCount_management(inMemoryRoadsLayer) selectedRoadsCount = int(selectedRoadsResult.getOutput(0)) print "Number of roads selected: " + str(selectedRoadsCount) roadRemovalCounter = 0 roadsReductionWhereClause = """ "ID2" IN (""" roadsReductionWhereClause = roadsReductionWhereClause + str( roadID) + """, """ # Remove the trailing ", " and add a closing parenthesis. roadsReductionWhereClause = roadsReductionWhereClause[:-2] + """) """ SelectLayerByAttribute_management(inMemoryRoadsLayer, "ADD_TO_SELECTION", roadsReductionWhereClause) # Debug only print "Selecting..." selectedRoadsResult = GetCount_management(inMemoryRoadsLayer) selectedRoadsCount = int(selectedRoadsResult.getOutput(0)) print "Number of roads selected: " + str(selectedRoadsCount) selectedRoadsResult = GetCount_management(inMemoryRoadsLayer) selectedRoadsCount = int(selectedRoadsResult.getOutput(0)) if selectedRoadsCount >= 1: DeleteFeatures_management(inMemoryRoadsLayer) else: pass