def sbdd_UniqueISP(): theFCs = ["CensusBlock", "Address", "RoadSegment", "Wireless", "Overview"] for theFC in theFCs: if arcpy.Exists(theFGDB + "/" + theFC + "_FRQ"): arcpy.Delete_management(theFGDB + "/" + theFC + "_FRQ") arcpy.Frequency_analysis("BB_Service_" + theFC, theFGDB + "/" + theFC + "_FRQ", ["FRN", "DBANAME", "PROVNAME"]) arcpy.Append_management([ theFGDB + "/Address_FRQ", theFGDB + "/RoadSegment_FRQ", theFGDB + "/Wireless_FRQ", theFGDB + "/Overview_FRQ" ], theFGDB + "/CensusBlock_FRQ", "TEST", "", "") if arcpy.Exists(theFGDB + "/ISP_FRQ"): arcpy.Delete_management(theFGDB + "/ISP_FRQ") arcpy.Frequency_analysis(theFGDB + "/CensusBlock_FRQ", theFGDB + "/ISP_FRQ", ["FRN", "DBANAME", "PROVNAME"]) myCnt = int(arcpy.GetCount_management(theFGDB + "/ISP_FRQ").getOutput(0)) myFile.write(",,Number of ISPs Provided in Submission " + "," + str(myCnt) + "\n") theFCs = [ "CensusBlock", "Address", "RoadSegment", "Wireless", "Overview", "ISP" ] for theFC in theFCs: if arcpy.Exists(theFGDB + "/" + theFC + "_FRQ"): arcpy.Delete_management(theFGDB + "/" + theFC + "_FRQ") del myCnt, theFC, theFCs return ()
def lightningDay(clip_feature, grid_feature, cell_size): # if arcpy.Exists("lightningDay.tif"): # return # 建立圆形缓冲区 point_feature = ''.join(["point", str(cell_size)]) arcpy.FeatureToPoint_management(grid_feature, point_feature) buffer_feature = ''.join(["buffer", str(cell_size)]) buffer_distance = ''.join([str(cell_size), " ", "Kilometers"]) arcpy.Buffer_analysis(point_feature, buffer_feature, buffer_distance) intersect_feature = ''.join(["intersect", str(cell_size)]) arcpy.Intersect_analysis([clip_feature, buffer_feature], intersect_feature) field_frequency = ''.join(["FID_buffer", cell_size]) frequency_table1 = ''.join(["intersect", str(cell_size), "_frequency1"]) arcpy.Frequency_analysis(intersect_feature, frequency_table1, ["Date", field_frequency]) frequency_table2 = ''.join(["intersect", str(cell_size), "_frequency2"]) arcpy.Frequency_analysis(frequency_table1, frequency_table2, field_frequency) layer = ''.join(["point", str(cell_size)]) arcpy.MakeFeatureLayer_management(point_feature, layer) field_join = ''.join(["FID_buffer", cell_size]) arcpy.AddJoin_management(layer, "ORIG_FID", frequency_table2, field_join) day_points = "dayPoints" arcpy.FeatureToPoint_management(layer, day_points) lightning_day_raster = Spline(day_points, "FREQUENCY") lightning_day_raster.save("lightningDay")
def sbdd_ProviderReport(): myFile.write(",Distinct Provided \n") theFCs = ["CensusBlock", "Address", "RoadSegment", "Wireless", "Prov"] for theFC in theFCs: if arcpy.Exists(theFGDB + "/" + theFC + "_FRQ"): arcpy.Delete_management(theFGDB + "/" + theFC + "_FRQ") theFCs = ["CensusBlock", "Address", "RoadSegment", "Wireless"] for theFC in theFCs: arcpy.Frequency_analysis("BB_Service_" + theFC, theFGDB + "/" + theFC + "_FRQ", \ ["FRN","DBANAME","PROVNAME"]) arcpy.Append_management([theFGDB + "/Address_FRQ", theFGDB + "/RoadSegment_FRQ", theFGDB + \ "/Wireless_FRQ"], theFGDB + "/CensusBlock_FRQ" ,"TEST" ,"" ,"") arcpy.Frequency_analysis(theFGDB + "/CensusBlock_FRQ" ,theFGDB + "/Prov_FRQ", \ ["FRN","DBANAME","PROVNAME"]) #go open up and read this table myFile.write("," + "FRN,DBANAME, PROVNAME" + "\n") for row in arcpy.SearchCursor(theFGDB + "/Prov_FRQ"): theFRN = str(row.getValue("FRN")) theDBA = row.getValue("DBANAME").encode('utf-8').strip() theDBA = theDBA.replace(",", " ") theDBA = theDBA.replace("'", " ") theProv = row.getValue("PROVNAME").encode('utf-8').strip() theProv = theProv.replace(",", " ") theProv = theProv.replace("'", " ") myFile.write("," + theFRN + "," + theDBA + "," + theProv + "\n") del theFRN, theDBA, theProv, row theFCs = ["CensusBlock", "Address", "RoadSegment", "Wireless"] #"Prov" for theFC in theFCs: if arcpy.Exists(theFGDB + "/" + theFC + "_FRQ"): arcpy.Delete_management(theFGDB + "/" + theFC + "_FRQ") del theFC, theFCs
def sbdd_SpeedReport(): myFile.write(",Distinct Speed Tiers Provided \n") theFCs = ["TechSpeed_MA_FRQ", "TechSpeed_TY_FRQ"] for theFC in theFCs: if arcpy.Exists(theFGDB + "/" + theFC): arcpy.Delete_management(theFGDB + "/" + theFC) theFCs = ["CensusBlock", "Address", "RoadSegment", "Wireless"] for theFC in theFCs: if arcpy.Exists(theFGDB + "/" + theFC + "_FRQ"): arcpy.Delete_management(theFGDB + "/" + theFC + "_FRQ") arcpy.Frequency_analysis("BB_Service_" + theFC, theFGDB + "/" + theFC + "_FRQ", \ ["TRANSTECH","MAXADDOWN","MAXADUP","TYPICDOWN","TYPICUP"]) arcpy.Append_management([theFGDB + "/Address_FRQ", theFGDB + "/RoadSegment_FRQ", theFGDB + \ "/Wireless_FRQ"], theFGDB + "/CensusBlock_FRQ" ,"TEST" ,"" ,"") arcpy.Frequency_analysis(theFGDB + "/CensusBlock_FRQ" ,theFGDB + "/TechSpeed_MA_FRQ", \ ["TRANSTECH","MAXADDOWN","MAXADUP"], ["FREQUENCY"]) arcpy.Frequency_analysis(theFGDB + "/CensusBlock_FRQ" ,theFGDB + "/TechSpeed_TY_FRQ", \ ["TRANSTECH","TYPICDOWN","TYPICUP"], ["FREQUENCY"]) arcpy.JoinField_management(theFGDB + "/TechSpeed_MA_FRQ", "OBJECTID", \ theFGDB + "/TechSpeed_TY_FRQ", "OBJECTID") #go open up and read this table myFile.write("," + "Maximum Advertised Speed,,,,,Typical Speed" + "\n") myFile.write( "," + "Technology,Download,Upload, Num. Records,,Technology,Download,Upload, Num. Records" + "\n") myCnt = 658 #658 is the row in the spreadsheet where this section begins for row in arcpy.SearchCursor(theFGDB + "/TechSpeed_MA_FRQ"): theMATech = str(row.getValue("TRANSTECH")) theMADown = str(row.getValue("MAXADDOWN")) theMAUp = str(row.getValue("MAXADUP")) theMAFRQ = str(row.getValue("FREQUENCY")) theTYTech = str(row.getValue("TRANSTECH_1")) theTYDown = str(row.getValue("TYPICDOWN")) theTYUp = str(row.getValue("TYPICUP")) theTYFRQ = str(row.getValue("FREQUENCY_1")) myFile.write("," + theMATech + "," + theMADown + "," + theMAUp + "," + theMAFRQ + \ ",," + theTYTech + "," + theTYDown + "," + theTYUp + "," + theTYFRQ + "\n") myCnt = myCnt + 1 del theMATech, theMAUp, theMADown, theMAFRQ, row, theTYTech, theTYUp, theTYDown, theTYFRQ theFCs = [ "CensusBlock", "Address", "RoadSegment", "Wireless", "TechSpeed_MA", "TechSpeed_TY" ] for theFC in theFCs: if arcpy.Exists(theFGDB + "/" + theFC + "_FRQ"): arcpy.Delete_management(theFGDB + "/" + theFC + "_FRQ") sbdd_WriteLine( 1000 - myCnt ) #we want the records for the next section on the same row dispite the difference in row count del theFC, theFCs
def test_table_to_polygon_w_grouping(self): '''Test Table To Polygon using Name field as the grouping Line Field''' Configuration.Logger.info(".....TableToPolygonTestCase.test_table_to_polygon_w_grouping") # Delete the output feature class if already exists if arcpy.Exists(self.outputPolygons) : arcpy.Delete_management(self.outputPolygons) # Note: tool fails when run with input "Name" and "Vsort" fields as params groupingFieldName = 'Name' toolOutput = arcpy.TableToPolygon_mt(self.inputTable, "DD_2", "POINT_X", "POINT_Y", self.outputPolygons, groupingFieldName, "Vsort") # 1: Check the expected return value self.assertIsNotNone(toolOutput, "No output returned from tool") outputOut = toolOutput.getOutput(0) self.assertEqual(self.outputPolygons, outputOut, "Unexpected return value from tool") self.assertTrue(arcpy.Exists(self.outputPolygons), "Output features do not exist or were not created") # Process to check tool results for Grouping # Step 1: Make in_memory table to get frequency of inMemTable = arcpy.TableToTable_conversion(self.inputTable, "in_memory", "TableToPolygon_single_In_Mem") # Step 2: Get the frequency of unique "group values" in the input table # Get Frequency of the unique names in the input table freqInputTable = arcpy.Frequency_analysis(inMemTable, "in_memory\\CountOfUniqueNames", groupingFieldName, "") # Get Count of the unique names freqTableCount = arcpy.GetCount_management(freqInputTable) expectedFeatureCount = int(freqTableCount.getOutput(0)) polygonCount = int(arcpy.GetCount_management(self.outputPolygons).getOutput(0)) self.assertEqual(polygonCount, expectedFeatureCount, "Expected %s features, but got %s" % (str(expectedFeatureCount), str(polygonCount))) return
def main(): arcpy.env.workspace = 'C:\Users\owner\Downloads\Sample_scripts\ch06' inputTable = 'Lagos_GPS_GCPs.shp' outputTable = 'C:\Users\owner\Downloads\Sample_scripts\ch06\Orders.dbf' freqField = 'Control_Or' #summaryField = ['1st Order', '2nd Order'] arcpy.Frequency_analysis(inputTable, outputTable, freqField)
def check_unique_id(polygon_fc, candidate_id, table_workspace='in_memory'): """Checks if the id you think is unique actually has no duplicates. Returns True if ID is unique, False if there are duplicates. If there are duplicates, remove them manually and use this function again when you are done to verify success.""" frequency_table = os.path.join( table_workspace, os.path.splitext(os.path.basename(polygon_fc))[0] + '_freqtable') arcpy.Frequency_analysis(polygon_fc, frequency_table, candidate_id) fields = [candidate_id, 'FREQUENCY'] with arcpy.da.SearchCursor(frequency_table, fields) as cursor: print_rows = [fields[0].ljust(80) + fields[1]] count = 0 for row in cursor: if row[1] > 1: count += 1 printable = row[0].ljust(80) + str(row[1]) print_rows.append(printable) if count > 0: print("WARNING: %s is NOT unique for feature class %s." % (candidate_id, polygon_fc)) for line in print_rows: print(line) return False else: print("Success! You can use the id %s for feature class %s" % (candidate_id, polygon_fc)) return True
def html_writeFreqTable(outHtml, fc, fields): # make frequency table fds = os.path.dirname(fc) freqTable = fds + 'xxxFreqTable' testAndDelete(freqTable) if debug1: addMsgAndPrint(fc + ' ' + str(fields)) arcpy.Frequency_analysis(fc, freqTable, fields) fieldNames = ['FREQUENCY'] for afield in fields: fieldNames.append(afield) if numberOfRows(freqTable) > 0: with arcpy.da.SearchCursor(freqTable, fieldNames) as cursor: for row in cursor: spaceString = '' for i in range(len(str(row[0])), 6): spaceString = spaceString + ' ' outHtml.write('<tt>' + spaceString + ' ' + str(row[0]) + ' ') for i in range(1, len(row)): outHtml.write(str(row[i]) + ' ') outHtml.write('</tt><br>\n') else: outHtml.write('<tt> no errors</tt><br>\n') if debug2 and numberOfRows(fc) > 0: addMsgAndPrint('input fc = ' + fc) addMsgAndPrint('input fc field names = ' + str(fieldNameList(fc))) addMsgAndPrint('# rows input fc = ' + str(numberOfRows(fc))) addMsgAndPrint(' frequency fields = ' + str(fields)) addMsgAndPrint('frequency table = ' + freqTable) addMsgAndPrint('freq table field names = ' + str(fieldNameList(freqTable))) addMsgAndPrint('# rows freq table = ' + str(numberOfRows(freqTable))) addMsgAndPrint(' ') testAndDelete(freqTable)
def UniquePointGUIDs(pointFC,guidFieldName): countPointGUIDs = 0 freqTable = os.path.join(TestUtilities.scratchGDB, "freqTable") arcpy.Frequency_analysis(pointFC, freqTable, guidFieldName) countPointGUIDs = int(arcpy.GetCount_management(freqTable).getOutput(0)) arcpy.Delete_management(freqTable) return countPointGUIDs
def quitaCampos(ws, tabla): #------------------------------------------------- lt = "T_CC_" + tabla[-4:len(tabla)] lr = [] lista = [f.name for f in arcpy.ListFields(tabla)] lr = listaElim(lista, lt) if eco: imprimir("ADD & CALCULATE, MES_CIERRE, YEAR_CIERRE, ZONA_CIERRE") arcpy.AddField_management(tabla, "MES_CIERRE", "TEXT", 12) arcpy.AddField_management(tabla, "YEAR_CIERRE", "SHORT") arcpy.AddField_management(tabla, "ZONA_CIERRE", "TEXT", 20) arcpy.CalculateField_management(tabla, 'MES_CIERRE', "!" + lt + "_Mes_Cierre!", 'PYTHON') arcpy.CalculateField_management(tabla, 'ZONA_CIERRE', "!" + lt + "_Zona!", 'PYTHON') agno = int(tabla[-4:len(tabla)]) arcpy.CalculateField_management(tabla, 'YEAR_CIERRE', agno, 'PYTHON') if eco: imprimir("FIN CALCULATE " + tabla) lista = [ f.name.upper() for f in arcpy.ListFields(ws + os.path.sep + tabla) ] if lt + cElim1[0] in lista: lr.append(lt + cElim1[0]) if lt + cElim1[1] in lista: lr.append(lt + cElim1[1]) if len(lr) > 0: if eco: imprimir("Eliminado Columns") imprimir(lr) arcpy.DeleteField_management(tabla, lr) if eco: imprimir("LISTO ELIMINACION") else: imprimir("=========================") imprimir("SIN CAMPOS A ELIMINAR " + tabla) imprimir("=========================") if eco: imprimir("FRECUENCIA CON " + tabla + "\n" + ws + os.path.sep + "f" + tabla) eliminarObjeto(ws + os.path.sep + "f" + tabla) arcpy.Frequency_analysis(tabla, ws + os.path.sep + "f" + tabla, [nac, "MES_CIERRE", "YEAR_CIERRE"]) if eco: imprimir("LISTO FRECUENCIA CON CAMPOS") if arcpy.Exists("f" + tabla): #imprimir([f.name for f in arcpy.ListFields("f"+tabla)]) ##arcpy.AddField_management("f"+tabla,nac,"LONG") ##exp = "clng(["+cNR+"_NUM_ACTA_1])" ##imprimir(exp) ##arcpy.CalculateField_management("f"+tabla,nac,exp,"VB") #fm= crearMapping(AC, "f"+tabla,cNR+"_NUM_ACTA_1") if eco: imprimir("APPEND..." + AC) arcpy.Append_management(["f" + tabla], AC, "NO_TEST") # , fm) if eco: imprimir("FIN APPEND")
def deduplicate(merged_file, rule_dictionary, unique_id='lagoslakeid'): order_fields = [] sort_fields = [] for i in range(1, len(rule_dictionary) + 1): # priority order rule = rule_dictionary[i] if rule['rule'] == 'min': order_fields.append('{} asc'.format(rule['field'])) elif rule['rule'] == 'max': order_fields.append('{} desc'.format(rule['field'])) else: sort_field = '{}_SORT'.format(rule['field']) sort_fields.append(sort_field) if not arcpy.ListFields(merged_file, sort_field): DM.AddField(merged_file, sort_field, 'SHORT') # Calculate new sort field with numeric order based on custom sort order with arcpy.da.UpdateCursor(merged_file, [rule['field'], sort_field]) as cursor: for row in cursor: row[1] = rule['sort'].index(row[0]) cursor.updateRow(row) order_fields.append('{} asc'.format(sort_field)) order_by_clause = 'ORDER BY {}'.format(', '.join(order_fields)) print(order_by_clause) print("Finding duplicate ids...") freq = arcpy.Frequency_analysis(merged_file, 'in_memory/freq', unique_id) dupe_ids = [ row[0] for row in arcpy.da.SearchCursor(freq, unique_id, '''"FREQUENCY" > 1''') ] for id in dupe_ids: if arcpy.ListFields(merged_file, '{}*'.format(unique_id))[0].type == 'String': filter = '''{} = '{}' '''.format(unique_id, id) else: filter = '''{} = {} '''.format(unique_id, id) with arcpy.da.UpdateCursor( merged_file, '*', filter, sql_clause=(None, order_by_clause)) as dupes_cursor: counter = 0 # Deletes all but the first sorted row. for dupe_row in dupes_cursor: print(dupe_row) time.sleep(.1) if counter != 0: print("DUPLICATE") dupes_cursor.deleteRow() counter += 1 print(' ') arcpy.Delete_management('in_memory/freq') for f in sort_fields: DM.DeleteField(merged_file, f)
def further_process_blended(): env.workspace = outBlendedWS env.overwriteOutput = True GISDBASCL = r'S:\LV_Valley_Imagery\2017\SwimmingPool2017\gdb\general_data.gdb\GISDBA_SCL_STREETS' fcs = arcpy.ListFeatureClasses() arcpy.MakeFeatureLayer_management(projectAreaTiles, 'TileClipLayer') for fc in fcs: print 'clipping ' + fc arcpy.MakeFeatureLayer_management(fc, 'lyr') arcpy.AddField_management('lyr', 'YARD', 'TEXT', '', '', '5') arcpy.AddField_management('lyr', 'TILENAME', 'Text', '', '', '8') arcpy.AddField_management('lyr', 'ERROR_TYPE', 'SHORT') arcpy.SelectLayerByAttribute_management( 'TileClipLayer', 'NEW_SELECTION', "BOOKSEC_PT = 'o" + fc[4:] + "'") arcpy.Clip_analysis(fc, 'TileClipLayer', outClippedBlendedWS + '\\' + fc + '_Clip') arcpy.SelectLayerByAttribute_management('TileClipLayer', 'CLEAR_SELECTION') env.workspace = outClippedBlendedWS env.overwriteOutput = True fcs = arcpy.ListFeatureClasses() arcpy.MakeFeatureLayer_management(projectAreaParcels, 'ProjAreaAOXLyr') arcpy.MakeFeatureLayer_management(GISDBASCL, 'GISDBA_SCL_STREETS') for fc in fcs: print "Performing Identity and Near Analysis on " + fc + "_Id" arcpy.Identity_analysis(fc, 'ProjAreaAOXLyr', outClippedBlendedIDWS + '\\' + fc + '_Id', 'ALL', '', 'NO_RELATIONSHIPS') arcpy.Near_analysis(outClippedBlendedIDWS + '\\' + fc + '_Id', 'GISDBA_SCL_STREETS', "300 Feet", "LOCATION", "NO_ANGLE", "PLANAR") env.workspace = outClippedBlendedIDWS env.overwriteOutput = True arcpy.MakeFeatureLayer_management(GISDBASCL, 'GISDBA_SCL_STREETS') fcs = arcpy.ListFeatureClasses() for fc in fcs: print "calculating frequency and stats on " + fc arcpy.MakeFeatureLayer_management(fc, 'lyr') arcpy.AddJoin_management('lyr', "NEAR_FID", 'GISDBA_SCL_STREETS', 'OBJECTID', 'KEEP_ALL') arcpy.Frequency_analysis( 'lyr', outClippedBlendedIDWS + '\\' + fc[:-8] + '_Frequen', '"{}.gridcode;{}.APN"'.format(fc, fc), '"{}.Shape_Area"'.format(fc)) arcpy.Statistics_analysis( outClippedBlendedIDWS + '\\' + fc[:-8] + '_Frequen', outClippedBlendedIDWS + '\\' + fc[:-8] + '_TOTAREA', "FREQUENCY COUNT;" + "{i}_Shape_Area SUM".format(i=fc), "{x}_APN".format(x=fc))
def flatten_poly_fc(in_layer_path, out_gdb_path, query=None): '''Check for overlaps and flatten, super region poly knockoff, POLYID joins back to original data''' try: log("Flattening {} due to overlaps".format(in_layer_path)) in_layer_nm = os.path.splitext(os.path.basename(in_layer_path))[0] shattered_fc = os.path.join(out_gdb_path, in_layer_nm + "_shattered") if query: log("We have a query: {}".format(query)) f_lyr = "f_lyr" arcpy.MakeFeatureLayer_management(in_layer_path, f_lyr, where_clause=query) arcpy.Union_analysis(f_lyr, shattered_fc, "ALL", "", "GAPS") log(arcpy.GetMessages()) else: arcpy.Union_analysis(in_layer_path, shattered_fc, "ALL", "", "GAPS"); log(arcpy.GetMessages()) shattered_singlepart_fc = os.path.join(out_gdb_path, in_layer_nm + "_shattered_singlepart") #this arcpy.MultipartToSinglepart_management(shattered_fc, shattered_singlepart_fc); log(arcpy.GetMessages()) polyid_field_nm = "POLYID" arcpy.AddField_management(shattered_singlepart_fc, polyid_field_nm, "LONG"); log(arcpy.GetMessages()) polyid_dict = {} polyid_value = 1 decimal_tolerance = 2 field_list = ["OID@","SHAPE@XY","SHAPE@AREA", polyid_field_nm] update_rows = arcpy.da.UpdateCursor(shattered_singlepart_fc, field_list) for row in update_rows: axyvalue = (round(row[1][0], decimal_tolerance), round(row[1][1], decimal_tolerance), round(row[2], decimal_tolerance)) if axyvalue not in polyid_dict: polyid_dict[axyvalue] = polyid_value polyid_value = polyid_value + 1 row[3] = polyid_dict[axyvalue] update_rows.updateRow(row) del row, update_rows del polyid_dict final_fc = os.path.join(out_gdb_path, in_layer_nm + "_flattened") try: arcpy.Dissolve_management(shattered_singlepart_fc, final_fc, polyid_field_nm, "", "SINGLE_PART"); log(arcpy.GetMessages()) except: log("Failed initial Dissolve, repairing geometry and trying again") arcpy.RepairGeometry_management(shattered_singlepart_fc); log(arcpy.GetMessages()) arcpy.Dissolve_management(shattered_singlepart_fc, final_fc, polyid_field_nm, "", "SINGLE_PART"); log(arcpy.GetMessages()) log("Creating POLYID lookup table") polyid_fc = os.path.join(out_gdb_path, in_layer_nm + "_polyid") fid_field = next(i.name for i in arcpy.ListFields(shattered_singlepart_fc) if "FID" in i.name) arcpy.Frequency_analysis(shattered_singlepart_fc, polyid_fc,"POLYID;{}".format(fid_field), "");log(arcpy.GetMessages()) arcpy.AddField_management(polyid_fc, "flattened_POLYID", "LONG");log(arcpy.GetMessages()) arcpy.CalculateField_management(polyid_fc,"flattened_POLYID", "!POLYID!", "PYTHON");log(arcpy.GetMessages()) arcpy.DeleteField_management(polyid_fc, "FREQUENCY;POLYID"); log(arcpy.GetMessages()) log("Successful finish to flattening routine") return [final_fc, polyid_fc] except Exception as e: log("EXCEPTION hit: {}".format(e))
def sbdd_RecordDetail(theFC, theField): myCnt = int(arcpy.GetCount_management(theFC).getOutput(0)) myFile.write(",Records Detail,Total Records," + str(myCnt) + "\n") sbdd_WriteLine("1") if theField <> "NOFIELD": if arcpy.Exists(theFGDB + "/sbdd_FRQ"): arcpy.Delete_management(theFGDB + "/sbdd_FRQ") arcpy.Frequency_analysis(theFC, theFGDB + "/sbdd_FRQ", theField) myCnt = int( arcpy.GetCount_management(theFGDB + "/sbdd_FRQ").getOutput(0)) myFile.write(",,Total Count of " + theField + "," + str(myCnt) + "\n") if arcpy.Exists(theFD + "/sbdd_FRQ"): arcpy.Delete_management(theFD + "/sbdd_FRQ") del myCnt, theFC, theField return ()
def _get_unique_routes(self): print ":: getting unique route names" routes = set() arcpy.Frequency_analysis(self._input.roads_feature, self._output._intermediate_frequency_output, self._fields.route_name) with SearchCursor(self._output._intermediate_frequency_output, self._fields.route_name) as cursor: for row in cursor: routes.add(row[0]) return routes
def test_table_to_polyline(self): '''Test Table To Polyline for ArcGIS Desktop''' Configuration.Logger.info( ".....TableToPolylineTestCase.test_table_to_polyline") # Delete the output feature class if already exists if arcpy.Exists(self.outputPolylines): arcpy.Delete_management(self.outputPolylines) toolOutput = arcpy.TableToPolyline_mt(self.inputTable, "DD_2", "POINT_X", "POINT_Y", \ self.outputPolylines, "Group_") # 1: Check the expected return value self.assertIsNotNone(toolOutput, "No output returned from tool") outputOut = toolOutput.getOutput(0) self.assertEqual(self.outputPolylines, outputOut, "Unexpected return value from tool") self.assertTrue(arcpy.Exists(self.outputPolylines), "Output features do not exist or were not created") # Process to check tool results for grouping # Step 1: Make in_memory table to get frequency of inMemTable = arcpy.TableToTable_conversion( self.inputTable, "in_memory", "TableToPolyline_single_In_Mem") # Step 2: Get the frequency of unique "group values" in the input table # Get Frequency of the unique names in the input table freqInputTable = arcpy.Frequency_analysis( inMemTable, "in_memory\\CountOfUniqueNames", "Group_", "") # Get Count of the unique names freqTableCount = arcpy.GetCount_management(freqInputTable) expectedFeatureCount = int(freqTableCount.getOutput(0)) polylineCount = int( arcpy.GetCount_management(self.outputPolylines).getOutput(0)) self.assertEqual( polylineCount, expectedFeatureCount, "Expected %s features, but got %s" % (str(expectedFeatureCount), str(polylineCount))) # Tool is not producing correct output - commented out check for now # See: https://github.com/Esri/military-tools-geoprocessing-toolbox/issues/254 # self.assertFeatureClassEqualSimple(self.baseFC, self.outputPolylines, \ # "OBJECTID", 0.0001) return
def sbdd_ProviderDetail(theFC): myFile.write(",Service Provider Details \n") theFields = ["PROVNAME", "DBANAME", "FRN"] for f in theFields: myCnt = 0 if arcpy.Exists(theFGDB + "/sbdd_FRQ"): arcpy.Delete_management(theFGDB + "/sbdd_FRQ") arcpy.Frequency_analysis(theFC, theFGDB + "/sbdd_FRQ", f) myCnt = int( arcpy.GetCount_management(theFGDB + "/sbdd_FRQ").getOutput(0)) myFile.write(",,Total Count of distinct " + f + "," + str(myCnt) + "\n") if arcpy.Exists(theFGDB + "/sbdd_FRQ"): arcpy.Delete_management(theFGDB + "/sbdd_FRQ") del theFC, myCnt return ()
def DissolveFC(inFC, outFC, fields, delLst=None, i=1): tmpFC = "{0}_{1}".format(outFC, i) DeleteExists(tmpFC) arcpy.Dissolve_management(inFC, tmpFC, fields, multi_part='MULTI_PART', unsplit_lines="DISSOLVE_LINES") tmpTab = os.path.join(arcpy.env.scratchGDB, "zzFreq") DeleteExists(tmpTab) arcpy.Frequency_analysis(tmpFC, tmpTab, fields) freq = 1 try: freq = int( next(arcpy.da.SearchCursor(tmpTab, ['FREQUENCY'], "FREQUENCY > 1"))[0]) except: freq = 1 if freq > 1: print 'We have a tiled output....' if i > 5: 'We have run more than 5 times, we will stop and return tiled output....' for tmp in delLst: print 'Deleting temp data....{0}'.format(tmp) DeleteExists(tmp) print 'Renaming temp data to final output....' arcpy.Rename_management(tmpFC, outFC) return outFC else: 'Going to run dissolve again....{0}:{1}'.format(tmpFC, outFC) if not delLst is None: delLst.append(tmpFC) else: delLst = [tmpFC] i += 1 DissolveFC(tmpFC, outFC, fields, delLst, i) else: if not delLst is None: for tmp in delLst: print 'Deleting temp data....{0}'.format(tmp) DeleteExists(tmp) print 'Renaming temp data to final output....' arcpy.Rename_management(tmpFC, outFC) return outFC
def UniqueValueToDomain(Workspace, Table, Field_Name, Domain_Name): arcpy.env.overwriteOutput = True tempFRQTable = 'IN_MEMORY/FRQ' tempFRQView = "frq_View" InputField = FindField(Table, str(Field_Name)) #arcpy.AddMessage(InputField.type) if (InputField.type in [ u'SmallInteger', u'Double', u'Long', u'OID', u'Single', u'Integer' ]): notEmptySQL = """"Code" IS NULL""" else: notEmptySQL = """"Code" IS NULL OR "Code" = ''""" # Process: Frequency arcpy.Frequency_analysis(Table, tempFRQTable, Field_Name) # Process: AddField arcpy.AddField_management(tempFRQTable, "Description", "TEXT") arcpy.AddField_management(tempFRQTable, "Code", InputField.type, InputField.precision, InputField.scale, InputField.length) # Process: CalculateField arcpy.CalculateField_management(tempFRQTable, "Description", "[" + Field_Name + "]", "VB", "") arcpy.CalculateField_management(tempFRQTable, "Code", "[" + Field_Name + "]", "VB", "") #Delete empty codes arcpy.MakeTableView_management(tempFRQTable, tempFRQView) arcpy.SelectLayerByAttribute_management(tempFRQView, "NEW_SELECTION", notEmptySQL) arcpy.DeleteRows_management(tempFRQView) # Process: TableToDomain arcpy.TableToDomain_management(tempFRQView, "Code", "Description", Workspace, Domain_Name, "Description", "REPLACE") # Process: AssignDomainToField arcpy.AssignDomainToField_management(Table, Field_Name, Domain_Name)
def updateParameters(self, params): params[0].value = "CR-RG-17-xxx R" #Populate list of range allotment IDs arcpy.Frequency_analysis('Range_Allotment_Polygons', 'in_memory\\freq', 'ALLOT_NO') valueList = [] with arcpy.da.SearchCursor('in_memory\\freq', 'ALLOT_NO', '"ALLOT_NO" IS NOT NULL') as cursor: for row in cursor: valueList.append(row[0]) valueList.sort() params[2].filter.type = "ValueList" params[2].filter.list = valueList return
def sbdd_FRQ(theFD, theFC): arcpy.AddMessage(" Begining " + theFC + " Processing") myTbls = [theFC + "_frq", theFC + "_" + theST + "_frq"] for myTbl in myTbls: if arcpy.Exists(myTbl): arcpy.Delete_management(myTbl) del myTbl, myTbls theFields = [ "FRN", "PROVNAME", "DBANAME", "TRANSTECH", "MAXADDOWN", "MAXADUP", "TYPICDOWN", "TYPICUP" ] #,"SPECTRUM"] theSTexp = "'" + theST + "'" if int(arcpy.GetCount_management(theFD + "/" + theFC).getOutput(0)) > 1: arcpy.Frequency_analysis(theFD + "/" + theFC, theFC + "_frq", theFields, "") arcpy.AddField_management(theFC + "_frq", "State", "TEXT", "", "", 2) arcpy.CalculateField_management(theFC + "_frq", "State", theSTexp, "PYTHON") arcpy.Rename_management(theFC + "_frq", theFC + "_" + theST + "_frq") del theSTexp, theFields return ()
def lightningDensity(clip_feature, grid_feature, cell_size): # if arcpy.Exists("lightningDensity.tif"): # return intersect_feature = ''.join(["intersect", str(cell_size)]) arcpy.Intersect_analysis([clip_feature, grid_feature], intersect_feature) frequency_table = ''.join(["intersect", str(cell_size), "_frequency"]) field_frequency = ''.join(["FID_GRID", str(cell_size)]) arcpy.Frequency_analysis(intersect_feature, frequency_table, field_frequency) layer = ''.join(["GRID", str(cell_size)]) arcpy.MakeFeatureLayer_management(grid_feature, layer) field_join = ''.join(["FID_GRID", str(cell_size)]) arcpy.AddJoin_management(layer, "OID", frequency_table, field_join) density_points = "densityPoints" arcpy.FeatureToPoint_management(layer, density_points) lightning_density_raster = Spline(density_points, "FREQUENCY") lightning_density_raster.save("lightningDensity")
def onClick(self): # show progress of statistics calculation: with pythonaddins.ProgressDialog() as dialog: dialog.title = "Progress Dialog" dialog.description = "Calculating Statistics..." dialog.animation = "Spiral" arcpy.env.overwriteOutput = True # temporarily allow overwriting of exisitng datasets, then disallow. output_table = r"C:\Users\s\Documents\Masters of Geospatial\GISP\Assignment2\GISdata\output_table.csv" arcpy.Frequency_analysis(selected_lyr, output_table, [[selected_field]]) arcpy.env.overwriteOutput = False with open( r"C:\Users\s\Documents\Masters of Geospatial\GISP\Assignment2\GISdata\output_table.csv" ) as csvfile: reader = csv.reader(csvfile, delimiter=",") next(reader) total = 0 print "Occurrences of each value for the field ({}) are as follows:".format( selected_field) for row in reader: total = total + int(row[1]) print "The value {0} occurs {1} times".format( row[2], row[1]) print "Total features are: {}".format(total) csvfile.seek(0) next(reader) for row in reader: value = float(row[1]) total = float(total) percentage = value / total * 100 print "The value {} is a {:.2f} percentage of the total occurrences".format( row[2], percentage)
# buggyFreq.py # Purpose: Find frequency of each value in each string field import arcpy arcpy.env.overwriteOutput = True arcpy.env.workspace = 'C:\Users\owner\Downloads\Sample_scripts\ch06\shapefiles' featureList = arcpy.ListFeatureClasses() for inputFile in featureList: fields = arcpy.ListFields(inputFile, '*', 'String') for field in fields: fieldName = field.name outTable = inputFile + fieldName + 'freq' arcpy.Frequency_analysis(inputFile, outTable, fieldName) print('Output table created: {0}'.format(outTable))
for row in rows: currentURL = row.csUriStem levRowCol = currentURL.split("/") theList = levRowCol[-3:] row.LevelID = theList[0] row.RowID = theList[1] row.ColumnID = theList[2] row.TileID = theList[0] + theList[1] + theList[2] rows.updateRow(row) del row del rows currentURL = "" arcpy.Frequency_analysis(TilingScheme_gdb + '/' + tileHits, TilingScheme_gdb + '/' + Frequency, FrequencyField, "") arcpy.AddIndex_management(TilingScheme_gdb + '/' + Frequency, "TileID", "TileHit_Index", "UNIQUE", "NON_ASCENDING") arcpy.AddMessage("Done!") except: # Get the traceback object # tb = sys.exc_info()[2] tbinfo = traceback.format_tb(tb)[0] # Concatenate information together concerning the error into a # message string # pymsg = tbinfo + "\n" + str(sys.exc_type) + ": " + str(sys.exc_value) # Return python error messages for use with a script tool
"ALL", "1 Feet", "NO_RELATIONSHIPS") arcpy.Identity_analysis(r'in_memory\aguse', SoilLayer, r'in_memory\aguse_soil', "ALL", "1 Feet", "NO_RELATIONSHIPS") arcpy.AddField_management(r'in_memory\aguse_soil', "ACRES", "DOUBLE", "#", "1", "#", "#", "NULLABLE", "NON_REQUIRED", "#") arcpy.CalculateField_management(r'in_memory\aguse_soil', "ACRES", "!shape.area@acres!", "PYTHON_9.3", "#") #Create Frequency table using (TYPE, MUSYM and Sum byACRES). Then Add Frequency table to mxd. arcpy.Frequency_analysis(r'in_memory\aguse_soil', r'in_memory\Freq', "TYPE;MUSYM", "ACRES") Freq = arcpy.mapping.TableView(r'in_memory\Freq') arcpy.mapping.AddTableView(df, Freq) #Create Frequency table from first frequency table using (TYPE and Sum by ACRES). Then Add Frequency table to mxd. arcpy.Frequency_analysis(r'in_memory\aguse_soil', r'in_memory\Freq1', "TYPE", "ACRES") Freq1 = arcpy.mapping.TableView(r'in_memory\Freq1') #Append PIN to error log if the parcel is not completely covered by ag use or soil layer. freqCursorFields = ["TYPE", "MUSYM"] freqCursor = arcpy.da.SearchCursor(r'in_memory\Freq', freqCursorFields) try: badpins = open(badpinslocation, 'a')
OriginsSubLayer = outNALayer.listLayers(originsLayerName)[0] DestinationsSubLayer = outNALayer.listLayers(destinationsLayerName)[0] LinesSubLayer = outNALayer.listLayers(subLayerNames["ODLines"])[0] # select lines by attribute arcpy.SelectLayerByAttribute_management(LinesSubLayer, "NEW_SELECTION", '"Total_Miles" <= 2') # write selected line features from the NA sublayer to a new feature class arcpy.CopyFeatures_management(LinesSubLayer, scenario + "lines_lt2mi") # write origin features from the NA sublayer to a new feature class arcpy.CopyFeatures_management(OriginsSubLayer, scenario + "origins") # calculate frequency of destinations reached by each origin point arcpy.Frequency_analysis(scenario + "lines_lt2mi", scenario + "lt2mi_FREQUENCY", "OriginID", "DestinationID") print("Frequency table calculated.") # join the frequency field back to the origin points arcpy.management.JoinField(scenario + "origins", "OBJECTID", scenario + "lt2mi_FREQUENCY", "OriginID", "FREQUENCY") ## # for the spatial join, create a new fieldmappings object and add the two input feature classes ## fieldmappings = arcpy.FieldMappings() ## fieldmappings.addTable(parcel_lyr) ## fieldmappings.addTable(scenario + "origins") ## ## # first get the FREQUENCY fieldmap, just joined to the origins layer.
nearSignalReqPhb = workspace + "Street_Near_Signal_Req_PHB" nearSignalReqPhbFreq = workspace + "Street_Near_Signal_Req_PHB_Freq" nearStreetlight = workspace + "Street_Near_Streetlight" nearSignal = workspace + "Street_Near_Signal" nearPedCrash2 = workspace + "Street_Near_Ped_Crash_200" nearPedCrash2Freq = workspace + "Street_Near_Ped_Crash_200_Freq" nearPedCrash4 = workspace + "Street_Near_Ped_Crash_400" nearPedCrash4Freq = workspace + "Street_Near_Ped_Crash_400_Freq" # ***LARGE RETAIL*** print("\n" + "Large Retail: Generate Near Table, Frequency, Add Index") arcpy.GenerateNearTable_analysis(streetSelect, largeRetail, nearLargeRetail, "100 feet", "NO_LOCATION", "NO_ANGLE", "ALL", 4, "PLANAR") print("\n" + arcpy.GetMessages()) arcpy.Frequency_analysis(nearLargeRetail, nearLargeRetailFreq, "IN_FID", "") print("\n" + arcpy.GetMessages()) arcpy.AddIndex_management(nearLargeRetailFreq, ["IN_FID"], "LargeRetailInd", "UNIQUE", "ASCENDING") print("\n" + arcpy.GetMessages()) # ***SCHOOLS*** print("\n" + "Schools: Generate Near Table, Frequency, Add Index") arcpy.GenerateNearTable_analysis(streetSelect, school, nearSchool, "100 feet", "NO_LOCATION", "NO_ANGLE", "ALL", 4, "PLANAR") print("\n" + arcpy.GetMessages()) arcpy.Frequency_analysis(nearSchool, nearSchoolFreq, "IN_FID", "") print("\n" + arcpy.GetMessages()) arcpy.AddIndex_management(nearSchoolFreq, ["IN_FID"], "SchoolInd", "UNIQUE", "ASCENDING") print("\n" + arcpy.GetMessages())
def routePartMerge (inputRoutesMZ, outGDB, outLayerName, lrsSchemaTemplate): """ Route part merging. Merges route parts into one feature. Populates LRS attributes.""" arcpy.AddMessage("Merging route parts") inRouteLayer = inputRoutesMZ outPath = outGDB outLayer = outLayerName outLayerTemplate = lrsSchemaTemplate inRouteDesc = arcpy.Describe(inRouteLayer) inFcShpField = inRouteDesc.ShapeFieldName inSpatialRef = inRouteDesc.spatialReference interstateRts = ["15", "70", "80", "84", "215"] usRts = ["6", "191", "89", "50", "89A", "491", "163", "40", "189", "90"] institutionalRts = ["284", "285", "286", "291", "292", "293", "294", "296", "298", "299", "303", "304", "309", "312", "317", "320"] print "settings complete" outLayer = arcpy.CreateFeatureclass_management (outPath, outLayer, "", outLayerTemplate, "ENABLED", "DISABLED", inSpatialRef) #Add route name field to input routes arcpy.AddField_management(inRouteLayer, "RtNumber", "TEXT", "", "", "15") #Calc new field to route name with direction arcpy.CalculateField_management(inRouteLayer, "RtNumber", """!ScrptRtID!.split("_")[0]""", "PYTHON_9.3") #Build unique table base on route_Direction field arcpy.Frequency_analysis(inRouteLayer, os.path.join(outPath, "Freq_out"), "RtNumber") #init cursor for freq table frequencyCursor = arcpy.SearchCursor(os.path.join(outPath, "Freq_out")) #init cursors and combine route parts outFeatureCursor = arcpy.InsertCursor(outLayer) #iterate through unique table for uniqueRtNum in frequencyCursor: #print uniqueRtNum.getValue("RtNumber") inRtCursor = arcpy.SearchCursor(inRouteLayer, "\"RtNumber\" = '" + uniqueRtNum.getValue("RtNumber") + "'", "", "", "RtNumber A")#select by route_dir sort by part num outRow = outFeatureCursor.newRow() newShpArray = arcpy.Array() previousPnt = arcpy.Point(0,0,0,0) featureCount = 0 for routePart in inRtCursor:#feature #Get field data from route part and add it to out table if featureCount == 0: #print "set RtName: " + str(routePart.getValue("RtNumber")) outRow.setValue("LABEL", str(routePart.getValue("RtNumber"))) outRow.setValue("RT_NAME", str(routePart.getValue("RtNumber"))[:4]) outRow.setValue("RT_DIR", str(routePart.getValue("RtNumber"))[-1:]) outRow.setValue("RT_TYPE", "M") #remove leading zeros from route nummber num = str(routePart.getValue("RtNumber"))[:4] while num.find("0") == 0: num = num[1:] #Type labeling if interstateRts.count(num) > 0: outRow.setValue("RT_MINDESC", "I " + num) outRow.setValue("CARTO", "1") elif usRts.count(num) > 0: outRow.setValue("RT_MINDESC", "US " + num) outRow.setValue("CARTO", "2") elif institutionalRts.count(num) > 0: outRow.setValue("RT_MINDESC", "SR " + num) outRow.setValue("CARTO", "I") elif int(num) >= 1000: outRow.setValue("RT_MINDESC", "FA " + num) outRow.setValue("CARTO", "9") else: outRow.setValue("RT_MINDESC", "SR " + num) outRow.setValue("CARTO", "3") rtPartShape = routePart.SHAPE featurePartCount = 0 for featurePart in rtPartShape:#feature part array if featureCount == 0 and featurePartCount == 0:#first feature test newShpArray.add(featurePart) elif previousPnt.disjoint(featurePart.getObject(0)): #print "prev: " + str(previousPnt.X) + " next: " + str(featurePart.getObject(0).X) newShpArray.add(featurePart) else: featurePart.remove(0) newShpArray.getObject(newShpArray.count - 1 ).extend(featurePart) featurePartCount += 1 lastArrayAddedToNewShp = newShpArray.getObject(newShpArray.count - 1 ) previousPnt = lastArrayAddedToNewShp.getObject(lastArrayAddedToNewShp.count - 1 ) #print "FPC = " + str(featurePartCount) featureCount += 1 #print "FC = " + str(featureCount) #build new feature in out layer. newShp = arcpy.Polyline(newShpArray, inSpatialRef) outRow.setValue(inFcShpField, newShp) outFeatureCursor.insertRow(outRow) try: del outRow del outFeatureCursor del inRtCursor del frequencyCursor arcpy.Delete_management(os.path.join(outPath, "Freq_out")) except: print "Some Temporary layers did not delete" print "Complete"
def deduplicate_nhd(in_feature_class, out_feature_class='', unique_id='Permanent_Identifier'): """ Returns an single feature class for all NHD features with no duplicated identifiers in it. :param in_feature_class: A feature class resulting from merging features from NHD datasets staged by subregion. :param out_feature_class: Optional. The feature class which will be created. :param unique_id: Optional. The identifier that needs to be unique in the output. :return: """ # SETUP if out_feature_class: arcpy.AddMessage("Copying initial features to output...") arcpy.CopyFeatures_management(in_feature_class, out_feature_class) else: out_feature_class = in_feature_class # EXECUTE # Delete full identicals first--these come from overlaps in staged subregion data before_count = int( arcpy.GetCount_management(out_feature_class).getOutput(0)) arcpy.AddMessage("Deleting full identicals...") # Check for full identicals on original *attribute fields*, excluding the one we specifically created to make them distinct # Also excluding object ID since that is obviously distinct excluded_fields = [ 'Shape', 'Shape_Length', 'Shape_Area', 'OBJECTID', 'nhd_merge_id' ] check_fields = [ f.name for f in arcpy.ListFields(out_feature_class) if f.name not in excluded_fields ] arcpy.DeleteIdentical_management(out_feature_class, check_fields) after_full_count = int( arcpy.GetCount_management(out_feature_class).getOutput(0)) arcpy.AddMessage( "{0} features were removed because they were full identicals to remaining features." .format(before_count - after_full_count)) # Delete duplicated IDs by taking the most recent FDate--these come from NHD editing process somehow arcpy.AddMessage("Deleting older features with duplicated identifiers...") # Get a list of distinct IDs that have duplicates arcpy.Frequency_analysis(out_feature_class, "in_memory/freqtable", unique_id) arcpy.TableSelect_analysis("in_memory/freqtable", "in_memory/dupeslist", '''"FREQUENCY" > 1''') count_dupes = int( arcpy.GetCount_management("in_memory/dupeslist").getOutput(0)) #If there are any duplicates, remove them by keeping the one with the latest FDate if count_dupes > 0: dupe_ids = [ row[0] for row in arcpy.da.SearchCursor("in_memory/dupeslist", ( unique_id)) ] dupe_filter = ''' "{}" = '{{}}' '''.format(unique_id) for id in dupe_ids: dates = [ row[0] for row in arcpy.da.SearchCursor( out_feature_class, ["FDate"], dupe_filter.format(id)) ] with arcpy.da.UpdateCursor(out_feature_class, [unique_id, "FDate"], dupe_filter.format(id)) as cursor: for row in cursor: if row[1] == max(dates): pass else: cursor.deleteRow() after_both_count = int( arcpy.GetCount_management(out_feature_class).getOutput(0)) arcpy.AddMessage( "{0} features were removed because they were less recently edited than another feature with the same identifier." .format(after_full_count - after_both_count)) arcpy.Delete_management("in_memory/freqtable") arcpy.Delete_management("in_memory/dupeslist")