def execute(self, params, messages): MarxanDB = params[0].valueAsText arcpy.env.workspace = "in_memory" pulayer = os.path.join(MarxanDB, "pulayer", "pulayer.shp") poly_neighbors = arcpy.PolygonNeighbors_analysis( pulayer, "poly_neighbors", "id") arcpy.AlterField_management(poly_neighbors, "src_id", "id1") arcpy.AlterField_management(poly_neighbors, "nbr_id", "id2") arcpy.AlterField_management(poly_neighbors, "LENGTH", "boundary") bound_dat = os.path.join(MarxanDB, "input", "bound.dat") fields = ["id1", "id2", "boundary"] with open(bound_dat, "a+") as f: f.write('\t'.join(fields) + '\n') with arcpy.da.SearchCursor(poly_neighbors, fields) as cursor: for row in cursor: f.write('\t'.join([str(r) for r in row]) + '\n') f.close() return
def getNeighbors(shpPath, tablePath): # find all neighbors, as neighbor table with arcpy.da.UpdateCursor(shpPath, ["OID@","Id"]) as cur: for row in cur: row[1]=row[0] cur.updateRow(row) arcpy.PolygonNeighbors_analysis(shpPath, tablePath,"Id", "NO_AREA_OVERLAP", "BOTH_SIDES", 0.001) print(arcpy.GetMessages())
def findNeighbors(in_feature, out_table, in_fields): print("findNeighbors().............") # env.workspace = 'D:\\projects\\lem\\matt\\gdbases\\{}.gdb'.format(gdb) arcpy.PolygonNeighbors_analysis(in_features=in_feature, out_table=out_table, in_fields=in_fields, area_overlap='AREA_OVERLAP', both_sides='BOTH_SIDES', out_linear_units='FEET', out_area_units='ACRES')
def collect_extents(lasd, base_dir, row, col): print('Collecting Task Extents') fishnet_path = os.path.join(base_dir, 'FISHNET') os.mkdir(fishnet_path) # Build Fishnet From Input LASD desc = arcpy.Describe(lasd) extent = desc.extent origin_point = str(extent.XMin) + ' ' + str(extent.YMin) y_axis = str(extent.XMin) + ' ' + str(extent.YMax) grid = arcpy.CreateFishnet_management( os.path.join(fishnet_path, 'fishnet.shp'), origin_point, y_axis, '0', '0', row, col, '#', 'False', lasd, 'POLYGON') # Buffer Fishnet To Ensure Grid Cells Overlap fish_buff = os.path.join(fishnet_path, 'fishnet_buff.shp') buff_grid = arcpy.Buffer_analysis(grid, fish_buff, '5 Meters', 'FULL', 'FLAT', 'NONE', '#', 'PLANAR') # Polygon Neighbors neighbor_table = os.path.join(fishnet_path, 'neigbor_table.dbf') neighbor_poly = arcpy.PolygonNeighbors_analysis(fish_buff, neighbor_table, 'FID') # Summarize Table summary_name = os.path.join(fishnet_path, 'summary.dbf') summary_table = arcpy.Statistics_analysis(neighbor_poly, summary_name, [['src_FID', 'COUNT']], 'src_FID') # Join Summary To Fishnet Buffer arcpy.JoinField_management(buff_grid, 'FID', summary_table, 'src_FID') # Populate Dictionary With Grid Cell Extents # Append Underscore To Inner Grid Cells (if row[1] == 8) ext_dict = {} for r in arcpy.da.SearchCursor(buff_grid, ['FID', 'COUNT_src_', 'SHAPE@']): ext = r[2].extent box = [ext.XMin, ext.YMin, ext.XMax, ext.YMax] if r[1] == 8: id = ''.join((str(r[0]), '_')) else: id = str(r[0]) ext_dict[id] = box print('Tasks: ', len(ext_dict)) return ext_dict
def check_geo_overlap(polygon_fc, unique_id, table_workspace='in_memory', acceptable_overlap=0): """Prints out a list of overlapping areas between polygons, if any. Returns a value of true or false indicating if there were areas of concern. polygon_fc: a polygon feature class path unique_id: the primary key or field that uniquely identifies this feature. If there is doubt about the uniqueness, use the check_unique_id function first. table_workspace: Default is "in_memory". Choose an output geodatabase. acceptable_overlap: the upper limit of the amount of overlap that can be considered negligible, in map units. For instance, 1 sq. m of overlap is not a problem for many applications""" neighbor_table = os.path.join( table_workspace, os.path.splitext(os.path.basename(polygon_fc))[0] + '_neighbortable') arcpy.PolygonNeighbors_analysis(polygon_fc, neighbor_table, unique_id, 'AREA_OVERLAP', 'NO_BOTH_SIDES') fields = ['src_' + unique_id, 'nbr_' + unique_id, 'AREA'] with arcpy.da.SearchCursor(neighbor_table, fields) as cursor: print_rows = [fields[0].ljust(80) + fields[1].ljust(80) + fields[2]] count = 0 for row in cursor: if row[2] > acceptable_overlap: count += 1 printable = row[0].ljust(80) + row[1].ljust(80) + str( round(row[2], 1)) print_rows.append(printable) if count > 0: print("WARNING: The following areas overlap in feature class %s." % polygon_fc) for line in print_rows: print(line) return True else: print("Success! There were no areas of overlap in feature class %s." % polygon_fc) return False
def polygonCalculation(self): #Creating new file with information about classification, delete field with class arcpy.PolygonNeighbors_analysis( self.dataSource, self.filename, [self.fieldName, "ClassNumber", "Shape_Area"], "NO_AREA_OVERLAP", "NO_BOTH_SIDES") Disolve = "ROBOCZY\\agregacja2.shp" arcpy.Dissolve_management(self.dataSource, Disolve, "ClassNumber", "ClassNumber COUNT", "SINGLE_PART", "UNSPLIT_LINES") arcpy.AddGeometryAttributes_management(Disolve, "AREA", "", "SQUARE_METERS", "") arcpy.DeleteField_management(self.dataSource, "ClassNumber") #Getting the value of the field in Disolve self.AreaHomogeneousClass = [] with arcpy.da.SearchCursor(Disolve, ["POLY_AREA"]) as rows: for row in rows: self.AreaHomogeneousClass.append(row[0]) return self.AreaHomogeneousClass del row
def colorPolygons(in_feature_class, feature_field, out_feature_class, needs_dissolve=False): arcpy.env.overwriteOutput = True ## # Create temporary directory ## temp_dir = os.path.join(tempfile.gettempdir(), 'zonal') ## index = 0 ## while os.path.exists(temp_dir): ## temp_dir = os.path.join(tempfile.gettempdir(), 'zonal%d' % index) ## index += 1 ## os.mkdir(temp_dir) # Initialize variables if needs_dissolve: temp_features = 'in_memory/dissolve' bldissolved = False # Dissolve on non-ObjectID field desc = arcpy.Describe(in_feature_class) arcpy.AddMessage("Dissolving features.") if hasattr(desc, "OIDFieldName"): if feature_field != desc.OIDFieldName: arcpy.Dissolve_management(in_feature_class, temp_features, \ feature_field) bldissolved = True else: temp_features = in_feature_class else: arcpy.Dissolve_management(in_feature_class, temp_features, \ feature_field) bldissolved = True # Get ObjectID field from dissolved if bldissolved: desc = arcpy.Describe(temp_features) oid_field = desc.OIDFieldName else: oid_field = feature_field else: temp_features = in_feature_class oid_field = feature_field # Calculate polygon contiguity arcpy.AddMessage("Identifying overlapping polygons...") arcpy.env.outputMFlag = "Disabled" result = arcpy.PolygonNeighbors_analysis(temp_features, 'in_memory/neighbors', oid_field, "AREA_OVERLAP", "BOTH_SIDES") if 'WARNING 000117:' in result.getMessages(1): arcpy.AddError("Input feature zone data: {} does not contain " "overlapping features.".format(temp_features)) sys.exit(1) arcpy.AddMessage("Identified overlapping polygons.") arcpy.AddMessage("Calculating feature subsets without overlaps...") # Retrieve as array with columns src_FID and nbr_FID arr = arcpy.da.TableToNumPyArray('in_memory/neighbors', ['src_%s' % oid_field, 'nbr_%s' % oid_field]) arr = numpy.array(arr.tolist()) # Retrieves the colors of the neighboring nodes def get_colors(nodes, neighbors): colors = set() for neighbor in neighbors: colors.add(nodes[neighbor][0]) colors.difference([0]) return colors # Create a new color def get_new_color(colors): return max(colors)+1 if len(colors) > 0 else 1 # Chooses from existing colors randomly def choose_color(colors): return random.choice(list(colors)) # Sort source FIDs in descending order by number of neighbors arr_uniq = numpy.unique(arr[:,0]) arr_count = numpy.zeros_like(arr_uniq) for index in range(arr_uniq.size): arr_count[index] = numpy.count_nonzero(arr[:, 0] == arr_uniq[index]) arr_ind = numpy.argsort(arr_count)[::-1] # Initialize node dictionary -- # where key := FID of feature (integer) # where value[0] := color of feature (integer) # where value[1] := FIDs of neighboring features (set) nodes = collections.OrderedDict() for item in arr_uniq[arr_ind]: nodes[item] = [0, set()] # Populate neighbors for index in range(arr.shape[0]): nodes[arr[index, 0]][1].add(arr[index, 1]) # Color nodes -- colors = set() for node in nodes: # Get colors of neighboring nodes nbr_colors = get_colors(nodes, nodes[node][1]) # Search for a color not among those colors choices = colors.difference(nbr_colors) # Assign the node that color or create it when necessary if len(choices) == 0: new_color = get_new_color(colors) colors.add(new_color) nodes[node][0] = new_color else: nodes[node][0] = choose_color(choices) # Classify nodes by colors -- classes = {} for node in nodes: color = nodes[node][0] if color in classes: classes[color].add(node) else: classes[color] = set([node]) # Get set of all FIDs all_fids = set() with arcpy.da.SearchCursor(temp_features, oid_field) as cursor: for row in cursor: all_fids.add(row[0]) # Add disjoint FIDs to new class if necessary disjoint_fids = all_fids.difference(set(nodes.keys())) if len(disjoint_fids) > 0: new_color = get_new_color(colors) classes[new_color] = disjoint_fids # Calculate number of classes num_classes = len(classes) # Save the classes as a new field--modified by Nicole Smith arcpy.AddField_management(temp_features, "OVERLAP_GROUP", "SHORT") for index, cl in enumerate(classes): ## arcpy.SetProgressorLabel( ## "Processing layer %d of %d..." % (index+1, num_classes)) test = tuple(map(int, classes[cl])) where_clause = '\"%s\" IN %s' % (oid_field, \ test) feature_lyr = arcpy.MakeFeatureLayer_management(temp_features) arcpy.SelectLayerByAttribute_management(feature_lyr, "NEW_SELECTION", where_clause) arcpy.CalculateField_management(feature_lyr, "OVERLAP_GROUP", cl, "PYTHON") arcpy.SelectLayerByAttribute_management(feature_lyr, "CLEAR_SELECTION") arcpy.CopyFeatures_management(in_feature_class, out_feature_class) arcpy.JoinField_management(out_feature_class, feature_field, temp_features, feature_field, "OVERLAP_GROUP")
arcpy.SetProgressorPosition() #get count of unassigned ZCTAS null_Count = 0 with arcpy.da.SearchCursor(ZCTAs,"Assigned_To","Assigned_To IS NULL") as cursor: for row in cursor: null_Count +=1 arcpy.AddMessage("{0} ZCTAs remain to be assigned".format(null_Count)) ################################################################################################### #Generate a neighbor table and find all instances of seed neighbors where the provider ZCTA is the #seed ZCTA and the dyad_Max in the dyad table is 1. ################################################################################################### #generate neighbor table of ZCTAs arcpy.AddMessage("Generating Neighbor Table for all features....") NeighborTable = arcpy.PolygonNeighbors_analysis(ZCTAs,"Temp_NBR_Table", ZCTA_field,"NO_AREA_OVERLAP","BOTH_SIDES","#","METERS","SQUARE_METERS") NBRTable_FieldList = [f.name for f in arcpy.ListFields(NeighborTable,"*")] #create neighbor table field list nbrZCTA_Field = [f for f in NBRTable_FieldList if 'nbr' in f][0] #find nbr_ZCTA field within field list srcZCTA_Field = [f for f in NBRTable_FieldList if 'src' in f][0] #find nbr_ZCTA field within field list arcpy.SetProgressor("step","finding seed neighbors where dyad max = 1...",0,len(seed_List),1) for i in seed_List: currentSeed = i # set current seed equal to i seedQuery = nbrZCTA_Field + " = '" + currentSeed + "' AND LENGTH > 0" #seed queary clause shared border length must be greater than 0 dyadQuery = DyadProv_field + " = " + currentSeed #dyad query temp_nbr_List = [] #temp list that will get re declared through each iteration #find all instances of neighbor ZCTAS being equal to current seed and populate list with arcpy.da.SearchCursor(NeighborTable,NBRTable_FieldList,seedQuery) as cursor:
def _load_assignments(self): """ Connect to the feature class with all tiles and init an assignment per tile. Calculate neighborhood for all tiles and assign the neighboring tiles to the assignment. The neighbourhood table association will be stored in memory and deleted after the extraction. The position of the neighbors are calculated with fixed numeric calculations according the tile number. """ self.tiling_gdb = settings.get("data.tiling.gdb") self.tiling_fc = os.path.join( self.tiling_gdb, settings.get("data.tiling.feature_class")) self.tiling_field = settings.get("data.tiling.field_name") polygons = arcpy.da.SearchCursor(self.tiling_fc, [self.tiling_field, "SHAPE@"]) for polygon in polygons: self.assignments.append( assignments.Assignment(polygon[0], settings.get("algorithm.tile_buffer"), tiles.Extent(extent=polygon[1].extent))) del polygons log.info("Found {} tiles in {}".format(len(self.assignments), self.tiling_fc)) nbr_table_name = "in_memory\\neighbours" arcpy.PolygonNeighbors_analysis(self.tiling_fc, nbr_table_name, self.tiling_field, both_sides="BOTH_SIDES") log.info( "Calculated polygon neighbourhood in {} and wrote result to {}". format(self.tiling_fc, nbr_table_name)) top_left_diffs = settings.get("data.tiling.top_left_diffs") top_right_diffs = settings.get("data.tiling.top_right_diffs") bottom_left_diffs = settings.get("data.tiling.bottom_left_diffs") bottom_right_diffs = settings.get("data.tiling.bottom_right_diffs") edge_length_north = settings.get("data.tiling.edge_length_north") edge_length_east = settings.get("data.tiling.edge_length_east") cursor = arcpy.da.SearchCursor(nbr_table_name, [ "src_{}".format(self.tiling_field), "nbr_{}".format( self.tiling_field), "LENGTH" ]) for row in cursor: src = row[0] nbr = row[1] length = row[2] assignment = self.get_assignment(src) nbr_assignment = self.get_assignment(nbr) if length == edge_length_east and nbr > src: assignment.set_neighbour(nbr, nbr_assignment.extent, tiles.Neighborhood.RIGHT) elif length == edge_length_east and nbr < src: assignment.set_neighbour(nbr, nbr_assignment.extent, tiles.Neighborhood.LEFT) elif length == edge_length_north and nbr > src: assignment.set_neighbour(nbr, nbr_assignment.extent, tiles.Neighborhood.BOTTOM_CENTER) elif length == edge_length_north and nbr < src: assignment.set_neighbour(nbr, nbr_assignment.extent, tiles.Neighborhood.TOP_CENTER) elif src - nbr in top_left_diffs: assignment.set_neighbour(nbr, nbr_assignment.extent, tiles.Neighborhood.TOP_LEFT) elif src - nbr in top_right_diffs: assignment.set_neighbour(nbr, nbr_assignment.extent, tiles.Neighborhood.TOP_RIGHT) elif src - nbr in bottom_left_diffs: assignment.set_neighbour(nbr, nbr_assignment.extent, tiles.Neighborhood.BOTTOM_LEFT) elif src - nbr in bottom_right_diffs: assignment.set_neighbour(nbr, nbr_assignment.extent, tiles.Neighborhood.BOTTOM_RIGHT) else: raise AttributeError( "Cannot calculate neighborhood of tile with number {}". format(src)) del cursor arcpy.Delete_management(nbr_table_name) log.debug("Deleted in memory feature class {}".format(nbr_table_name))
def main(gdb, version, scale, rc_version, levellist): print 'main() function............................' for level in levellist: print('--------- {} -----------------------'.format(level)) #### export the raw census feature class into postgres convertFCtoPG(gdb=gdb, pgdb='lem', schema=version, table=level, geomtype='MULTIPOLYGON', epsg=102003) ##### run the arcgis arcpy.PolygonNeighbors_analysis to get the neighbors of each feature in featureclass arcpy.PolygonNeighbors_analysis( in_features='{0}\\{1}'.format(gdb, level), out_table='{0}.gdb\\{1}_neighbors'.format(version, level), in_fields="geoid") addGDBTable2postgres_io(gdb=version, schema=version, table="{}_neighbors".format(level)) ####### create majority zonal stats with raw NWALT raster ##################################################### ZonalStatisticsAsTable( in_zone_data='{0}\\{1}'.format(gdb, level), zone_field="geoid", in_value_raster="rasters.gdb\\nwalt_{0}m".format(scale), out_table='{0}.gdb\\{2}_zonal_maj_nwalt_{1}m'.format( version, scale, level), ignore_nodata="DATA", statistics_type="MAJORITY") addGDBTable2postgres_io(gdb=version, schema=version, table="{0}_zonal_maj_nwalt_{1}m".format( level, scale)) ###### create zonal stats with BIOMES raster ##################################################### ZonalStatisticsAsTable( in_zone_data='{0}\\{1}'.format(gdb, level), zone_field="geoid", in_value_raster="rasters.gdb\\biomes_vegtype_conus_exp10_{}m". format(scale), out_table='{0}.gdb\\{2}_zonal_maj_biomes_{1}m'.format( version, scale, level), ignore_nodata="DATA", statistics_type="MAJORITY") addGDBTable2postgres_io(gdb=version, schema=version, table="{0}_zonal_maj_biomes_{1}m".format( level, scale)) ####### create majority zonal stats with reclassed NWALT raster ##################################################### ZonalStatisticsAsTable( in_zone_data='{0}\\{1}'.format(gdb, level), zone_field="geoid", in_value_raster="rasters.gdb\\nwalt_{0}m_rc_v2".format(scale), out_table='{0}.gdb\\{2}_zonal_maj_nwalt_rc_{1}m'.format( version, scale, level), ignore_nodata="DATA", statistics_type="MAJORITY") addGDBTable2postgres_io(gdb=version, schema=version, table="{0}_zonal_maj_nwalt_rc_{1}_{2}m".format( level, rc_version, scale))
InputTable = ArcCatalogPath + "\\" + GISDBPath + "\\WangYouCellThiess.DBO.GIS_CELL_TSSPOLY_LTE" + yestday.strftime( '%Y%m%d') ZJJ_DETAIL = ArcCatalogPath + "\\" + JGYHDbPath + "\\JGYH.DBO.GIS_CELL_TSSPOLY_LTE_D" + yestday.strftime( '%Y%m%d') rows = arcpy.SearchCursor(InputTable) row = rows.next() if row: print "delete exists ZJJ_DETAIL" logging.info("delete exists ZJJ_DETAIL") if (arcpy.Exists(ZJJ_DETAIL)): arcpy.Delete_management(ZJJ_DETAIL) print "PolygonNeighbors_analysis begin" logging.info("PolygonNeighbors_analysis begin") # Process: 面邻域 arcpy.PolygonNeighbors_analysis( InputTable, ZJJ_DETAIL, "OBJECTID;CELL_NAME;CI;LATITUDE;LONGITUDE;SITE_NAME", "NO_AREA_OVERLAP", "BOTH_SIDES", "", "METERS", "SQUARE_MILES") print "PolygonNeighbors_analysis success" logging.info("PolygonNeighbors_analysis success") # Process: 添加字段 arcpy.AddField_management(ZJJ_DETAIL, "DISTANCE", "DOUBLE", "12", "2", "", "DISTANCE", "NULLABLE", "NON_REQUIRED", "") logging.info("AddField_management success") dbConn = pyodbc.connect( 'DRIVER={SQL Server};SERVER=10.48.186.12;DATABASE=JGYH;UID=sa;PWD=!Passw0rd@' ) cursor = dbConn.cursor() updateThiessZjjSql = " update GIS_CELL_TSSPOLY_LTE_D" + yestday.strftime( '%Y%m%d' ) + " set DISTANCE =(dbo.fnGetDistance(src_LATITUDE,src_LONGITUDE,nbr_LATITUDE,nbr_LONGITUDE))*1000"
# -*- coding: utf-8 -*- # --------------------------------------------------------------------------- # find_neighbors.py # Created on: 2020-02-24 09:31:41.00000 # (generated by ArcGIS/ModelBuilder) # Description: # --------------------------------------------------------------------------- # Import arcpy module import arcpy wd = "D:/florida_hurricane/" # Local variables: fl_2016_shp = "D:/florida_hurricane/raw_data/shapefiles/fl_2016/fl_2016.shp" fl_2016_PolygonNeighbors1 = "C:/Users/morrisk/Documents/ArcGIS/Default.gdb/fl_2016_PolygonNeighbors1" neighbors_txt = fl_2016_PolygonNeighbors1 temp = "D:/florida_hurricane/temp" # Process: Polygon Neighbors arcpy.PolygonNeighbors_analysis(fl_2016_shp, fl_2016_PolygonNeighbors1, "pct;county", "NO_AREA_OVERLAP", "BOTH_SIDES", "", "FEET", "SQUARE_FEET") # Process: Table to Table arcpy.TableToTable_conversion(fl_2016_PolygonNeighbors1, temp, "neighbors.txt", "", "src_pct \"src_pct\" true true false 50 Text 0 0 ,First,#,C:/Users/morrisk/Documents/ArcGIS/Default.gdb/fl_2016_PolygonNeighbors1,src_pct,-1,-1;nbr_pct \"nbr_pct\" true true false 50 Text 0 0 ,First,#,C:/Users/morrisk/Documents/ArcGIS/Default.gdb/fl_2016_PolygonNeighbors1,nbr_pct,-1,-1;src_county \"src_county\" true true false 50 Text 0 0 ,First,#,C:/Users/morrisk/Documents/ArcGIS/Default.gdb/fl_2016_PolygonNeighbors1,src_county,-1,-1;nbr_county \"nbr_county\" true true false 50 Text 0 0 ,First,#,C:/Users/morrisk/Documents/ArcGIS/Default.gdb/fl_2016_PolygonNeighbors1,nbr_county,-1,-1;LENGTH \"LENGTH\" true true false 8 Double 0 0 ,First,#,C:/Users/morrisk/Documents/ArcGIS/Default.gdb/fl_2016_PolygonNeighbors1,LENGTH,-1,-1;NODE_COUNT \"NODE_COUNT\" true true false 4 Long 0 0 ,First,#,C:/Users/morrisk/Documents/ArcGIS/Default.gdb/fl_2016_PolygonNeighbors1,NODE_COUNT,-1,-1", "")
AddFieldIfNotexists(t_3_paths_summary, "n5_score", "Short") expression = "score( !n5_3_paths_average_betweenness! )" codeblock = """def score(value): limit = 1366759 if value > limit: return 3 else: return 1""" arcpy.CalculateField_management(t_3_paths_summary, "n5_score", expression, "PYTHON_9.3", codeblock) # ######################### # # SCORE 3 - Neighbour count # # ######################### # t_3_polygons_neighbors = "temp_3_polygon_neighbors" arcpy.PolygonNeighbors_analysis(v_m98_areas, t_3_polygons_neighbors, in_fields="JOIN_ID", area_overlap="AREA_OVERLAP", both_sides="BOTH_SIDES", cluster_tolerance="10 Meters") # Summarize by recreational area t_3_polygons_summary = "temp_3_polygon_summary" arcpy.Statistics_analysis(t_3_polygons_neighbors, t_3_polygons_summary, [["src_JOIN_ID","COUNT"]], "src_JOIN_ID") arcpy.Delete_management(t_3_polygons_neighbors) arcpy.AlterField_management(t_3_polygons_summary, "src_JOIN_ID", 'JOIN_ID', 'JOIN_ID') # Give score 3 to areas where COUNT is > limit AddFieldIfNotexists(t_3_polygons_summary, "n5_score", "Short") expression = "score( !COUNT_src_JOIN_ID! )" codeblock = """def score(value): limit = 4 if value > limit: return 3 else:
shapefile = raw_input('Enter the name of the shapefile: ') else: shutil.copy(currDir + "\\GenerateShapefile.py", workdir + "\\GenerateShapefile.py") os.chdir(workdir) execfile("GenerateShapefile.py") shapefile = "test.shp" os.chdir(currDir) # make shapefile into full path shapefile = os.path.join(workdir, shapefile) # generate Neighbors table if not os.path.isfile(os.path.join(workdir, "neighborTable.dbf")): arcpy.PolygonNeighbors_analysis(shapefile, os.path.join(workdir, "neighborTable.dbf"), ["FID", "Name"], "AREA_OVERLAP", "BOTH_SIDES", "1 FOOT", "", "") with arcpy.da.SearchCursor(os.path.join(workdir, "neighborTable.dbf"), ['src_FID', 'nbr_Name', 'src_Name']) as cursor: for row in cursor: fid = str(row[0]) zip_name = ZipFile(newpath + '\\tile_' + fid + ".zip", 'a', allowZip64=True) # put the neighbor laz file in the zipfile zip_name.write(os.path.join(workdir, row[1]), row[1]) zip_name.close() del cursor # for each polygon in original shapefile
def percent_geographic_doubles(polygon_fc, unique_id, percent_overlap_allowed=10, keep_fc='', keep_field=''): neighbor_table = 'in_memory/neighbortable' cu.multi_msg('Calculating neighbor table...') arcpy.PolygonNeighbors_analysis(polygon_fc, neighbor_table, unique_id, 'AREA_OVERLAP', 'NO_BOTH_SIDES') # need these to avoid some naming problems that arise from trying to # do this directly src_field = arcpy.ListFields(neighbor_table, 'src*')[0].name nbr_field = arcpy.ListFields(neighbor_table, 'nbr*')[0].name arcpy.CopyFeatures_management(polygon_fc, 'in_memory/fc') fdate_field = arcpy.ListFields('in_memory/fc', '*FDate*')[0].name cu.multi_msg('Joining neighbor table to feature class...') arcpy.JoinField_management('in_memory/fc', unique_id, neighbor_table, src_field) cursor_fields = ['AREA', 'SHAPE@AREA', unique_id, nbr_field, fdate_field] ## print([f.name for f in arcpy.ListFields('in_memory/fc')]) if keep_fc: keep_ids = [ row[0] for row in arcpy.da.SearchCursor(keep_fc, keep_field) ] else: keep_ids = [] with arcpy.da.SearchCursor('in_memory/fc', cursor_fields) as cursor: for row in cursor: # If this row represents a duplicate-type overlap if row[0] >= row[1] * (100 - percent_overlap_allowed) / 100: src_value = row[2] nbr_value = row[3] cu.multi_msg("testing. pair: %s and %s" % (src_value, nbr_value)) # Then lookup both rows in the overlap and delete the one with # the oldest FDate where_clause = '''"%s" = '%s' OR "%s" = '%s' ''' % ( unique_id, src_value, unique_id, nbr_value) dates = [ row[4] for row in arcpy.da.SearchCursor( 'in_memory/fc', cursor_fields, where_clause) ] cu.multi_msg("testing. dates %s and %s not in order" % (dates[0], dates[1])) with arcpy.da.UpdateCursor('in_memory/fc', cursor_fields, where_clause) as c: for r in c: if r[4] == min(dates) and r[4] == max(dates): cu.multi_msg( "PROBLEM! Same date. Resolve pair %s, %s manually." % (src_value, nbr_value)) if r[4] == min(dates) and r[4] != max(dates): if r[4] not in keep_ids: cu.multi_msg( "%s has the older date (%s) and would be removed." % (r[2], r[4])) if r[4] in keep_ids: cu.multi_msg( "You're going to have to write this condition..." ) else: continue else: continue for item in [neighbor_table, 'in_memory/fc']: arcpy.Delete_management(item)
def topology_repair( inFile=path_to_shapefile, dissolve_field="", gap_threshold=10000): # threshold is max area of gaps that are considered to be errors # create variables for necessary paths, create gdb, import inFile into feature dataset gdb = os.path.basename(inFile[:-3] + 'gdb') gdbDir= os.path.dirname(inFile) arcpy.CreateFileGDB_management(gdbDir, gdb) arcpy.env.workspace = gdbDir + '/' + gdb feature_ds = arcpy.env.workspace + '/topology_ds' data = arcpy.env.workspace + '/topology_ds/' + os.path.basename(inFile[:-4]) topology = feature_ds + '/Topology' arcpy.CreateFeatureDataset_management(arcpy.env.workspace, "topology_ds", inFile[:-3] + 'prj') arcpy.FeatureClassToGeodatabase_conversion([inFile], "topology_ds") # Create topology, add feature class, define rules arcpy.CreateTopology_management(feature_ds, "Topology") arcpy.AddFeatureClassToTopology_management(topology, data) arcpy.AddRuleToTopology_management(topology, "Must Not Overlap (Area)",data,"","","") arcpy.ValidateTopology_management(topology) # create polygon inFile from errors and delete arcpy.ExportTopologyErrors_management(topology, "", "overlapErrors") arcpy.AddField_management("overlapErrors_poly", dissolve_field, "STRING") o = "o" arcpy.CalculateField_management('overlapErrors_poly', dissolve_field,o) # Create topology, add feature class, define rules arcpy.CreateTopology_management(feature_ds, "Topology") arcpy.AddFeatureClassToTopology_management(topology, data) arcpy.AddRuleToTopology_management(topology, "Must Not Have Gaps (Area)",data,"","","") arcpy.ValidateTopology_management(topology) # create polygon inFile from errors and merge with original data arcpy.ExportTopologyErrors_management(topology, "", "gapErrors") arcpy.FeatureToPolygon_management("gapErrors_line","topo_errors_gaps") arcpy.SelectLayerByAttribute_management ("topo_errors_gaps", "NEW_SELECTION", '"Shape_Area" < ' + str(gap_threshold)) arcpy.AddField_management("topo_errors_gaps", dissolve_field, "STRING") g = "g" arcpy.CalculateField_management('topo_errors_gaps', dissolve_field,g ) arcpy.SelectLayerByAttribute_management ("topo_errors_gaps", "SWITCH_SELECTION") arcpy.DeleteRows_management("topo_errors_gaps") arcpy.Merge_management(["overlapErrors_poly", "topo_errors_gaps" ,inFile],"topomerged") # Get neighbor table and export to gdb arcpy.PolygonNeighbors_analysis('topomerged', 'topo_errors',['OBJECTID', dissolve_field]) # doesn't always find neighbors on all sides of polygon arcpy.TableToGeodatabase_conversion('topo_errors',arcpy.env.workspace) #table to array and array to dataframe nbr_field = 'nbr_' + dissolve_field arr = arcpy.da.FeatureClassToNumPyArray(("topo_errors"), ("src_OBJECTID", nbr_field, "LENGTH")) index = [str(i) for i in range(1, len(arr)+1)] df = pd.DataFrame(arr, index=index) df = df.groupby(['src_OBJECTID','nbr_TYPE'],as_index = False)['LENGTH'].sum() #sum in case several sides of polygon have same neighbor #select rows from df and export to csv and to gdb idx = df.groupby(['src_OBJECTID'])['LENGTH'].transform(max) == df['LENGTH'] df_select = df [idx] df_select.to_csv(gdbDir+'/joinme.csv', index=False) arcpy.TableToTable_conversion(gdbDir+'/joinme.csv', arcpy.env.workspace, "joinme") # Merge error polygons, join field, delete overlaps from infile, assign type to error polygons, merge all and dissolve arcpy.JoinField_management('topomerged', 'OBJECTID', 'joinme', 'src_OBJECTID', 'nbr_TYPE') arcpy.FeatureClassToFeatureClass_conversion('topomerged', "", 'topo_errors_join') arcpy.SelectLayerByAttribute_management("topo_errors_join", "NEW_SELECTION", "TYPE = 'o'") arcpy.SelectLayerByAttribute_management("topo_errors_join", "ADD_TO_SELECTION", "TYPE = 'g'") arcpy.SelectLayerByAttribute_management ("topo_errors_join", "SWITCH_SELECTION") arcpy.DeleteRows_management("topo_errors_join") #leave only error polygons arcpy.AlterField_management('topo_errors_join', 'TYPE', 'orig_TYPE','orig_TYPE') arcpy.AlterField_management('topo_errors_join', 'nbr_TYPE', 'TYPE','TYPE') arcpy.Erase_analysis(inFile,'overlapErrors_poly','infile_overlaps_erased') arcpy.Merge_management(["topo_errors_join","infile_overlaps_erased"],"merged_neighbors") arcpy.Dissolve_management('merged_neighbors', 'dissolved_neighbors', 'TYPE')
Polygon_Neighbors = "{}/polytest".format(gdb) PolygonNeighbor_TableSelect = "{}/PolygonNeighbor_TableSelect".format(gdb) inFeatures_lyr = "Polygon_Neighbors_tv".format(gdb) dropFields = [ "MapUnitPolys_IdentityConfidence", "PolygonNeighbor_TableSelect_NODE_COUNT", "MapUnitPolys_Label", "MapUnitPolys_Notes", "MapUnitPolys_Symbol", "PolygonNeighbor_TableSelect_OBJECTID" ] # Process: Polygon Neighbors arcpy.PolygonNeighbors_analysis(MapUnitPolys, Polygon_Neighbors, "OBJECTID;MapUnit", "NO_AREA_OVERLAP", "BOTH_SIDES", "", "METERS", "SQUARE_METERS") Polygon_Neighbors_tv = arcpy.MakeTableView_management( Polygon_Neighbors, "Polygon_Neighbors_tv")[0] # Process: Select Layer By Attribute arcpy.SelectLayerByAttribute_management(Polygon_Neighbors_tv, "NEW_SELECTION", "src_MapUnit = nbr_MapUnit") # Process: Table Select arcpy.TableSelect_analysis(Polygon_Neighbors, PolygonNeighbor_TableSelect, "src_MapUnit = nbr_MapUnit") arcpy.GetCount_management(PolygonNeighbor_TableSelect) arcpy.AddMessage(arcpy.GetMessages())
def createPolyNeiTable(inputFC, outputTable, field): ARCPY.env.overwriteOutput = True ARCPY.PolygonNeighbors_analysis(inputFC, outputTable, field)
for field in fieldMappings.fields: if field.name not in fields_to_keep: fieldMappings.removeFieldMap( fieldMappings.findFieldMapIndex(field.name)) # Merge original fire poly with reburn poly; cannot calculate shared perimeter if they're in separate feature classes #merged_polys = os.path.join(ws, "merged_polys" + str(n_poly) + ".shp") #"in_memory/merged_polys" arcpy.Merge_management( ["in_memory/older_fc", "in_memory/reburn_fc"], "in_memory/merged_polys", fieldMappings) # Get length of shared boundary between original fire and reburn area shared_table = os.path.join(ws, "table" + str(n_poly) + ".dbf") arcpy.PolygonNeighbors_analysis( "in_memory/merged_polys", shared_table, [ 'FireName', 'FireName_1', 'acres', 'acres_1', 'parentid', 'parentid_1' ], "NO_AREA_OVERLAP", "NO_BOTH_SIDES", "#", "METERS") # Add boundary length table to list tbl_list.append(os.path.join(ws, shared_table)) # clean up print 'Deleting memory...' arcpy.Delete_management('in_memory') except: print ' ** Could not save polygon ' + str(poly_id) ts1 = time.time() print 'Time elapsed is: ' + str(ts1 - ts0) print arcpy.GetMessages()
arcpy.DefineProjection_management('box', sr_gcs) arcpy.Clip_analysis('m', 'box', 'm_c') arcpy.Erase_analysis('box','m_c', 'm_other') arcpy.MultipartToSinglepart_management('m_other', 'slivers') r = arcpy.da.TableToNumPyArray('slivers', ['OBJECTID']) f = np.zeros((len(r),), dtype=[('raw_type','<U20'),('raw_id','<i4'),('raw_name','<U255'),('raw_key','<U10')]); f['raw_type'].fill('sliver') rf = np.lib.recfunctions.merge_arrays([r, f], flatten=True) arcpy.da.ExtendTable('slivers', 'OBJECTID', rf, 'OBJECTID', append_only=False) # merge slivers print('merge slivers (%s)' % time.strftime('%H:%M:%S')) arcpy.Merge_management(['m_c','slivers'],'m_c_s') # neighbor analysis: SLOW (~ 5 min) print('neighbor analysis (%s)' % time.strftime('%H:%M:%S')) arcpy.PolygonNeighbors_analysis('m_c_s', 'nbrs_m_c_s', ['OBJECTID','raw_type','raw_id','raw_name','raw_key'], 'NO_AREA_OVERLAP') # get merged data, add empty spatial sp_* fields and use PANDAS data frame print('get merged data, add empty spatial sp_* fields and use PANDAS data frame (%s)' % time.strftime('%H:%M:%S')) m = arcpy.da.TableToNumPyArray('m_c_s', ['OBJECTID','raw_type','raw_id','raw_name','raw_key','Shape_Area']) f = np.zeros((len(m),), dtype=[('sp_type','<U20'),('sp_id','<i4'),('sp_name','<U255'),('sp_key','<U10')]) m = pandas.DataFrame(np.lib.recfunctions.merge_arrays([m, f], flatten=True), index=m['OBJECTID']) # fao bordering land: presume land gap filled by fao if small print('fao bordering land: presume land gap filled by fao (%s)' % time.strftime('%H:%M:%S')) n = pandas.DataFrame(arcpy.da.TableToNumPyArray( 'nbrs_m_c_s', ['src_OBJECTID','src_raw_type','src_raw_id','src_raw_name','src_raw_key', 'nbr_OBJECTID','nbr_raw_type','nbr_raw_id','nbr_raw_name','nbr_raw_key','LENGTH'], "src_raw_type = 'fao' AND nbr_raw_type = 'land' AND LENGTH > 0")) d = n.groupby(['src_OBJECTID']).agg(lambda df: df.iloc[df['LENGTH'].values.argmax()])
"NULLABLE", "NON_REQUIRED", "") arcpy.CalculateField_management(cell05cent2, "coastLAT", "!NEAR_Y!", "PYTHON_9.3", "") arcpy.CalculateField_management(cell05cent2, "coastLON", "!NEAR_X!", "PYTHON_9.3", "") arcpy.DeleteField_management(cell05cent2, ["NEAR_FID", "NEAR_DIST", "NEAR_X", "NEAR_Y"]) ########################################################################### # 5) use polygon neighbors to get dyadic structure ########################################################################### # Process: Polygon Neighbors print "obtaining the list of adjacent cell pairs using Polygon Neighbors" arcpy.PolygonNeighbors_analysis(cell05, cellneighbors, "cell05_id", "NO_AREA_OVERLAP", "NO_BOTH_SIDES", "", "DECIMAL_DEGREES", "SQUARE_MILES") ########################################################################### # 6) converting output to txt ########################################################################### fms = arcpy.FieldMappings() fms.addTable(cell05cent2dbf) print "converting table to txt: centroids" # Process: Table to Table arcpy.TableToTable_conversion(cell05cent2dbf, outdir, cell05cent2txt, "", fms, "") del fms fms = arcpy.FieldMappings()
def Reporte_neigbors(Capa_Entrada, area_no_permitida, tipo_validacion, ruta): ##definir listas, longitudes id_menores25,id_conVecinos8,id_NoAptoPermitido,id_exclusiones,id_aptitudes,campos_busqueda1,id_NoApto, id_AptoPermitdo,id_AconVecinos8= [],[],[],[],[],[],[],[],[] id_menores25_U,id_NoAptoPermitido_U,id_exclusiones_u,id_aptitudes_u,id_NoApto_U,id_AptoPermitdo_u,id_AconVecinos8= [],[],[],[],[],[],[] N_menores25,N_aptitudes,N_exclusiones,N_NoAptoPermitidos,N_NoApto,N_AptoPermitdo = [],[],[],[],[],[] ###Valores_gridcode valor_permitido = 8 Valor_noapto = 0 Valores_aptitud = list( set([ fila[0] for fila in arcpy.da.SearchCursor(Capa_Entrada, ["gridcode"]) ])) Valores_aptitud.remove(Valor_noapto) Valores_aptitud.remove(valor_permitido) ####Definir campos para procesar Polygon neighbors campo_oid = str([ campo.name for campo in arcpy.Describe(Capa_Entrada).fields if campo.type == 'OID' ][0]) Campos_neighbors = "%s;gridcode;Shape_Area" % (campo_oid) # Process: Polygon Neighbors nombre_gdb = "pol_vecinos_%s" % ( datetime.datetime.now().strftime("%b_%d_%Y_%H_%M_%S")) nombre_gdb = nombre_gdb.replace(".", "") gdb = arcpy.CreateFileGDB_management(ruta, nombre_gdb) Capa_neighbors = str(gdb) + "\\" + os.path.basename( Capa_Entrada) + "_Neighbor" Tabla_neighbors = arcpy.PolygonNeighbors_analysis( Capa_Entrada, Capa_neighbors, Campos_neighbors, "NO_AREA_OVERLAP", "BOTH_SIDES", "", "METERS", "SQUARE_METERS") ##Analisis de poligonos permitidos campos_busqueda1 = Campos_neighbors.split(";") campos = [ "src_" + campos_busqueda1[0], "nbr_" + campos_busqueda1[0], "src_" + campos_busqueda1[1], "nbr_" + campos_busqueda1[1], "src_" + campos_busqueda1[2], "nbr_" + campos_busqueda1[2] ] with arcpy.da.SearchCursor(Tabla_neighbors, campos) as cursor: for fila in cursor: if fila[4] < int(area_no_permitida): id_menores25.append(fila[0]) if fila[2] == Valor_noapto: if fila[3] == valor_permitido: id_conVecinos8.append(fila[0]) elif fila[3] <> valor_permitido: id_NoApto.append(fila[0]) if fila[2] in Valores_aptitud: if fila[3] == valor_permitido: id_AconVecinos8.append(fila[0]) elif fila[3] <> valor_permitido: id_aptitudes.append(fila[0]) elif fila[2] == valor_permitido: id_exclusiones.append(fila[0]) else: pass [ id_NoAptoPermitido.append(OID_SRC) for OID_SRC in id_conVecinos8 if id_conVecinos8.count(OID_SRC) == id_menores25.count(OID_SRC) ] [ id_AptoPermitdo.append(OID_SRC) for OID_SRC in id_AconVecinos8 if id_AconVecinos8.count(OID_SRC) == id_menores25.count(OID_SRC) ] ####Eliminar duplicados en las listas id_NoAptoPermitido_U = list(set(id_NoAptoPermitido)) id_menores25_U = list(set(id_menores25)) id_exclusiones_u = list(set(id_exclusiones)) id_aptitudes_u = list(set(id_aptitudes)) id_NoApto_U = list(set(id_NoApto)) id_AptoPermitdo_u = list(set(id_AptoPermitdo)) ####poligonos con menos de 25 ha N_menores25 = len(id_menores25_U) N_aptitudes = len(id_aptitudes_u) N_AptoPermitdo = len(id_AptoPermitdo_u) N_exclusiones = len(id_exclusiones_u) N_NoAptoPermitidos = len(id_NoAptoPermitido_U) N_NoApto = len(id_NoApto_U) campoOID = campos_busqueda1[0] #####ESCRIBIR LAS CONSULTAS DEPENDIENDO DEL TIPO DE VALIDACIÓN pol_Vecinos = "Hay %s polígonos con menos de 25 Ha \n \n" % (N_menores25) if tipo_validacion == "TODOS": if N_NoAptoPermitidos == 0 and N_exclusiones == 0 and N_AptoPermitdo == 0: pass else: pol_Vecinos = pol_Vecinos + "------ Polígonos con menos de 25 Ha PERMITIDOS: \n \n -----" if N_NoAptoPermitidos > 0: pol_Vecinos = pol_Vecinos + "Hay %s polígonos con menos de 25 Ha, que corresponden a áreas NO APTAS rodeadas de EXCLUSIONES. \n %s in %s \n \n" % ( N_NoAptoPermitidos, campoOID, str(tuple(id_NoAptoPermitido_U))) if N_AptoPermitdo > 0: pol_Vecinos = pol_Vecinos + "Hay %s polígonos con menos de 25 Ha, que corresponden a áreas APTAS,rodeadas de EXCLUSIONES.\n %s in %s \n \n" % ( N_AptoPermitdo, campoOID, str(tuple(id_AptoPermitdo_u))) if N_exclusiones > 0: pol_Vecinos = pol_Vecinos + "Hay %s polígonos con menos de 25 Ha, que corresponden a áreas de EXCLUSIONES.\n %s in %s \n \n" % ( N_exclusiones, campoOID, str(tuple(id_exclusiones_u))) if N_aptitudes == 0 and N_NoApto == 0: pass else: pol_Vecinos = pol_Vecinos + "#### Polígonos con menos de 25 Ha NO PERMITIDOS \n \n ####" if N_aptitudes > 0: pol_Vecinos = pol_Vecinos + "Hay %s polígonos con menos de 25 Ha, que corresponden a áreas APTAS, rodeadas de polígonos diferentes a exclusiones.\n %s in %s \n \n" % ( N_aptitudes, campoOID, str(tuple(id_aptitudes_u))) if N_NoApto > 0: pol_Vecinos = pol_Vecinos + "Hay %s polígonos con menos de 25 Ha, que corresponden a áreas NO APTAS, rodeadas de polígonos diferentes a exclusiones.\n %s in %s \n \n" % ( N_NoApto, campoOID, str(tuple(id_NoApto_U))) elif tipo_validacion == "PERMITIDOS": if N_NoAptoPermitidos == 0 and N_exclusiones == 0 and N_AptoPermitdo == 0: pass else: pol_Vecinos = pol_Vecinos + "#### Polígonos con menos de 25 Ha PERMITIDOS: \n \n ####" if N_NoAptoPermitidos > 0: pol_Vecinos = pol_Vecinos + "Hay %s polígonos con menos de 25 Ha, que corresponden a áreas NO APTAS rodeadas de EXCLUSIONES. \n %s in %s \n \n" % ( N_NoAptoPermitidos, campoOID, str(tuple(id_NoAptoPermitido_U))) if N_AptoPermitdo > 0: pol_Vecinos = pol_Vecinos + "Hay %s polígonos con menos de 25 Ha, que corresponden a áreas APTAS,rodeadas de EXCLUSIONES.\n %s in %s \n \n" % ( N_AptoPermitdo, campoOID, str(tuple(id_AptoPermitdo_u))) if N_exclusiones > 0: pol_Vecinos = pol_Vecinos + "Hay %s polígonos con menos de 25 Ha, que corresponden a áreas de EXCLUSIONES.\n %s in %s \n \n" % ( N_exclusiones, campoOID, str(tuple(id_exclusiones_u))) elif tipo_validacion == "NO PERMITIDOS": if N_aptitudes == 0 and N_NoApto == 0: pass else: pol_Vecinos = pol_Vecinos + "#### Polígonos con menos de 25 Ha NO PERMITIDOS \n \n ####" if N_aptitudes > 0: pol_Vecinos = pol_Vecinos + "Hay %s polígonos con menos de 25 Ha, que corresponden a áreas APTAS, rodeadas de polígonos diferentes a exclusiones.\n %s in %s \n \n" % ( N_aptitudes, campoOID, str(tuple(id_aptitudes_u))) if N_NoApto > 0: pol_Vecinos = pol_Vecinos + "Hay %s polígonos con menos de 25 Ha, que corresponden a áreas NO APTAS, rodeadas de polígonos diferentes a exclusiones.\n %s in %s \n \n" % ( N_NoApto, campoOID, str(tuple(id_NoApto_U))) return pol_Vecinos