def process_ws(ws_fc, zone_name): # generate new zone ids DM.AddField(ws_fc, 'zoneid', 'TEXT', field_length=10) DM.CalculateField(ws_fc, 'zoneid', '!lagoslakeid!', 'PYTHON') ws_fc_lyr = DM.MakeFeatureLayer(ws_fc) # multipart DM.AddField(ws_fc, 'ismultipart', 'TEXT', field_length=2) with arcpy.da.UpdateCursor(ws_fc, ['ismultipart', 'SHAPE@']) as u_cursor: for row in u_cursor: if row[1].isMultipart: row[0] = 'Y' else: row[0] = 'N' u_cursor.updateRow(row) print("Edge flags...") # add flag fields DM.AddField(ws_fc, 'onlandborder', 'TEXT', field_length = 2) DM.AddField(ws_fc, 'oncoast', 'TEXT', field_length = 2) # identify border zones border_lyr = DM.MakeFeatureLayer(LAND_BORDER, 'border_lyr') DM.SelectLayerByLocation(ws_fc_lyr, 'INTERSECT', border_lyr) DM.CalculateField(ws_fc_lyr, 'onlandborder', "'Y'", 'PYTHON') DM.SelectLayerByAttribute(ws_fc_lyr, 'SWITCH_SELECTION') DM.CalculateField(ws_fc_lyr, 'onlandborder' ,"'N'", 'PYTHON') # identify coastal zones coastal_lyr = DM.MakeFeatureLayer(COASTLINE, 'coastal_lyr') DM.SelectLayerByLocation(ws_fc_lyr, 'INTERSECT', coastal_lyr) DM.CalculateField(ws_fc_lyr, 'oncoast', "'Y'", 'PYTHON') DM.SelectLayerByAttribute(ws_fc_lyr, 'SWITCH_SELECTION') DM.CalculateField(ws_fc_lyr, 'oncoast' ,"'N'", 'PYTHON') print("State assignment...") # States state_geo = r'D:\Continental_Limnology\Data_Working\LAGOS_US_GIS_Data_v0.6.gdb\Spatial_Classifications\state' find_states(ws_fc, STATES_GEO) # glaciation status? calc_glaciation(ws_fc, 'zoneid') # preface the names with the zones DM.DeleteField(ws_fc, 'ORIG_FID') fields = [f.name for f in arcpy.ListFields(ws_fc, '*') if f.type not in ('OID', 'Geometry') and not f.name.startswith('Shape_')] for f in fields: new_fname = '{zn}_{orig}'.format(zn=zone_name, orig = f).lower() try: DM.AlterField(ws_fc, f, new_fname, clear_field_alias = 'TRUE') # sick of debugging the required field message-I don't want to change required fields anyway except: pass # cleanup lyr_objects = [lyr_object for var_name, lyr_object in locals().items() if var_name.endswith('lyr')] for l in lyr_objects: DM.Delete(l)
def outputPMP(type, area, outPath): desc = arcpy.Describe(basin) basinName = desc.baseName pmpPoints = env.scratchGDB + "\\PMP_Points" # Location of 'PMP_Points' feature class which will provide data for output outType = type[:1] outArea = str(int(round(area, 0))) + "sqmi" outFC = outType + "_" + outArea #I don't think I need this..... arcpy.AddMessage("\nCopying PMP_Points feature class to " + outFC + "...") #outFC might be replaced with outpath... dm.Merge( pmpPoints, outPath ) # merge the scratch feature layer(s) of vector grid cells into the outputs arcpy.AddMessage("\nCreating Basin Summary Table...") tableName = type + "_PMP_Basin_Average" + "_" + outArea tablePath = env.scratchGDB + "\\" + tableName dm.CreateTable(env.scratchGDB, tableName) # Create blank table cursor = arcpy.da.InsertCursor( tablePath, "*") # Create Insert cursor and add a blank row to the table cursor.insertRow([0]) del cursor dm.AddField(tablePath, "STORM_TYPE", "TEXT", "", "", 10, "Storm Type") # Create "Storm Type" field dm.CalculateField(tablePath, "STORM_TYPE", "'" + type + "'", "PYTHON_9.3") # populate storm type field i = 0 for field in arcpy.ListFields( pmpPoints, "PMP_*" ): # Add fields for each PMP duration and calculate the basin average fieldName = field.name fieldAve = basinAve( basin, fieldName ) # Calls the basinAve() function - returns the average (weighted or not) dm.AddField(tablePath, fieldName, "DOUBLE", "", 2) # Add duration field dm.CalculateField(tablePath, fieldName, fieldAve, "PYTHON_9.3") # Assigns the basin average i += 1 arcpy.AddMessage("\nSummary table complete.") basAveTables.append(tablePath) return
def educ_to_xy(layer, out_dir, scaling_factor): levels = ["less_than_hs", "high_school", "some_college", "bachelors", "graduate"] for level in levels: trunc_level = level[0:10] # Add and calculate the new field field_name = level + str(scaling_factor) mg.AddField(layer, field_name, "SHORT") expr = "!{}! / {}".format(trunc_level, scaling_factor) mg.CalculateField(layer, field_name, expr) # Generate the dots out_dots = level + str(scaling_factor) mg.CreateRandomPoints(out_path = out_dir, out_name = out_dots, constraining_feature_class = layer, number_of_points_or_field = field_name)
def georeference_lakes( lake_points_fc, out_fc, lake_id_field, lake_name_field, lake_county_field='', state='', master_gdb=r'C:\Users\smithn78\Dropbox\CL_HUB_GEO\Lake_Georeferencing\Masters_for_georef.gdb' ): """ Evaluate water quality sampling point locations and either assign the point to a lake polygon or flag the point for manual review. :param lake_points_fc: :param out_fc: :param lake_id_field: :param lake_name_field: :param lake_county_field: :param state: :param master_gdb: Location of master geodatabase used for linking :return: """ master_lakes_fc = os.path.join(master_gdb, MASTER_LAKES_FC) master_lakes_lines = os.path.join(master_gdb, MASTER_LAKES_LINES) master_streams_fc = os.path.join(master_gdb, MASTER_STREAMS_FC) master_xwalk = os.path.join(master_gdb, MASTER_XWALK) # setup arcpy.AddMessage("Joining...") state = state.upper() if state not in STATES: raise ValueError('Use the 2-letter state code abbreviation') arcpy.env.workspace = 'in_memory' out_short = os.path.splitext(os.path.basename(out_fc))[0] join1 = '{}_1'.format(out_short) join2 = '{}_2'.format(out_short) join3 = '{}_3'.format(out_short) join3_select = join3 + '_select' join4 = '{}_4'.format(out_short) join5 = '{}_5'.format(out_short) joinx = '{}_x'.format(out_short) county_name_results = arcpy.ListFields( lake_points_fc, '{}*'.format(lake_county_field))[0].name if lake_county_field and not lake_county_field in county_name_results: print('{} field does not exist in dataset.'.format(lake_county_field)) raise Exception point_fields = [f.name for f in arcpy.ListFields(lake_points_fc)] # update the lake id to a text field if not already lake_id_field_type = arcpy.ListFields(lake_points_fc, lake_id_field)[0].type if lake_id_field_type != 'String': temp_id_field = '{}_t'.format(lake_id_field) arcpy.AddField_management(lake_points_fc, '{}_t'.format(lake_id_field), 'TEXT', '255') expr = '!{}!'.format(lake_id_field) arcpy.CalculateField_management(lake_points_fc, temp_id_field, expr, 'PYTHON') arcpy.DeleteField_management(lake_points_fc, lake_id_field) arcpy.AlterField_management(lake_points_fc, temp_id_field, new_field_name=lake_id_field) # Try to make some spatial connections and fulfill some logic to assign a link join1 = AN.SpatialJoin(lake_points_fc, master_lakes_fc, join1, 'JOIN_ONE_TO_MANY', 'KEEP_ALL', match_option='INTERSECT') join2 = AN.SpatialJoin(join1, master_streams_fc, join2, 'JOIN_ONE_TO_MANY', 'KEEP_ALL', match_option='INTERSECT') join3 = AN.SpatialJoin(join2, master_lakes_fc, join3, 'JOIN_ONE_TO_MANY', 'KEEP_ALL', match_option='INTERSECT', search_radius='10 meters') join4 = AN.SpatialJoin(join3, master_lakes_fc, join4, 'JOIN_ONE_TO_MANY', 'KEEP_ALL', match_option='INTERSECT', search_radius='100 meters') # setup for editing lake assignment values DM.AddField(join4, 'Auto_Comment', 'TEXT', field_length=100) DM.AddField(join4, 'Manual_Review', 'SHORT') DM.AddField(join4, 'Shared_Words', 'TEXT', field_length=100) DM.AddField(join4, 'Linked_lagoslakeid', 'LONG') DM.AddField(join4, 'GEO_Discovered_Name', 'TEXT', field_length=255) DM.AddField(join4, 'Duplicate_Candidate', 'TEXT', field_length=1) DM.AddField(join4, 'Is_Legacy_Link', 'TEXT', field_length=1) update_fields = [ lake_id_field, lake_name_field, MASTER_LAKE_ID, MASTER_GNIS_NAME, # 0m match 'PERMANENT_IDENTIFIER_1', 'GNIS_NAME_1', # stream match MASTER_LAKE_ID + '_1', MASTER_GNIS_NAME + '_12', # 10m match MASTER_LAKE_ID + '_12', MASTER_GNIS_NAME + '_12_13', # 100m match 'Auto_Comment', 'Manual_Review', 'Shared_Words', 'Linked_lagoslakeid' ] # use a cursor to go through each point and evaluate its assignment cursor = arcpy.da.UpdateCursor(join4, update_fields) arcpy.AddMessage("Calculating link status...") for row in cursor: id, name, mid_0, mname_0, stream_id, streamname_0, mid_10, mname_10, mid_100, mname_100, comment, review, words, lagosid = row if mid_0 is not None: # if the point is directly in a polygon if name and mname_0: words = lagosGIS.list_shared_words(name, mname_0, exclude_lake_words=False) comment = 'Exact location link' lagosid = mid_0 review = -1 elif mid_0 is None and mid_10 is not None: # if the point is only within 10m of a lake if name and mname_10: words = lagosGIS.list_shared_words(name, mname_10, exclude_lake_words=False) if words: comment = 'Linked by common name and location' lagosid = mid_10 review = -1 else: comment = 'Linked by common location' lagosid = mid_10 review = 1 elif mid_0 is None and mid_10 is None: if stream_id is not None: # if there is a stream match comment = 'Not linked because represented as river in NHD' review = 2 else: if mid_100 is not None: # if the point is only within 100m of lake(s) if name and mname_100: words = lagosGIS.list_shared_words( name, mname_100, exclude_lake_words=True) # TODO: Frequency check if words: comment = 'Linked by common name and location' lagosid = mid_100 review = 1 else: comment = 'Linked by common location' lagosid = mid_100 review = 2 cursor.updateRow( (id, name, mid_0, mname_0, stream_id, streamname_0, mid_10, mname_10, mid_100, mname_100, comment, review, words, lagosid)) # # So I haven't been able to get the county logic to work and it hasn't been that important yet, ignore for now # Select down to a minimum set because we're about to join on county, which will create lots of duplicate matches # Then join calculated results back to full set # if lake_county_field: # join5 = AN.Select(join4, join5, 'Manual_Review IS NULL') # lakes_state = AN.Select(MASTER_LAKES_FC, 'lakes_state', "{0} = '{1}'".format(MASTER_STATE_NAME, state)) # lakes_state_lyr = DM.MakeFeatureLayer(lakes_state, 'lakes_state_lyr') # join5_lyr = DM.MakeFeatureLayer(join5, 'join5_lyr') # DM.AddJoin(join5_lyr, lake_county_field, lakes_state_lyr, MASTER_COUNTY_NAME) # join5_with_county = DM.CopyFeatures(join5_lyr, 'join5_with_cty') # j5 = 'DEDUPED_CA_SWAMP_data_linked_5.' # # county_update_fields = [j5 + lake_id_field, j5 + lake_name_field, j5 + lake_county_field, # 'lakes_state.' + MASTER_LAKE_ID, 'lakes_state.' + MASTER_GNIS_NAME, 'lakes_state.' + MASTER_COUNTY_NAME, # j5 + 'Auto_Comment', j5 + 'Manual_Review', j5 + 'Shared_Words', # j5 + 'Linked_lagoslakeid'] # with arcpy.da.UpdateCursor(join5_lyr, county_update_fields) as cursor: # for row in cursor: # id, name, county, mid_cty, mname_cty, mcounty, comment, review, words, lagosid = row # if county is not None and mcounty is not None: # if name and mname_cty: # words = lagosGIS.list_shared_words(name, mname_cty, exclude_lake_words=True) # if words: # comment = 'PRELIMINARY: Linked by common name and location' # lagosid = mid_cty # review = 2 # cursor.updateRow((id, name, county, mid_cty, mname_cty, mcounty, comment, review, words, lagosid)) # DM.RemoveJoin(join5_lyr) # join5_with_county = DM.CopyFeatures(join5_lyr, 'join5_with_county') # # # join5 = DM.JoinField(join5, lake_county_field, lakes_state, MASTER_COUNTY_NAME, # fields = [MASTER_COUNTY_NAME, MASTER_LAKE_ID, MASTER_GNIS_NAME]) # # # This is a long way to make a join # join_dict = {} # with arcpy.da.SearchCursor(lakes_state, [MASTER_COUNTY_NAME, MASTER_LAKE_ID, MASTER_GNIS_NAME]) as cursor: # for row in cursor: # join_value, val1, val2 = row # join_dict[join_value] = [val1, val2] # # arcpy.AddField_management(join5, MASTER_LAKE_ID + 'cntyj', 'LONG') # arcpy.AddField_management(join5, MASTER_GNIS_NAME + 'cntyj', 'TEXT', 255) # # with arcpy.da.SearchCursor(join5, [lake_county_field, MASTER_LAKE_ID + 'cntyj', MASTER_GNIS_NAME + 'cntyj']) as cursor: # for row in cursor: # key_value = row[0] # words = lagosGIS.list_shared_words() # if join_dict.has_key(key_value): # row[1] = join_dict[key_value][0] # row[2] = join_dict[key_value][1] # else: # row[1] = None # row[2] = None # cursor.updateRow(row) # # # county_update_fields = [lake_id_field, lake_name_field, lake_county_field, # MASTER_LAKE_ID + '_12_13_14', MASTER_GNIS_NAME + '_12_13', MASTER_COUNTY_NAME + '_12_13', # county # 'Auto_Comment', 'Manual_Review', 'Shared_Words', # 'Linked_lagoslakeid'] # cursor = arcpy.da.UpdateCursor(join5, county_update_fields) # for row in cursor: # id, name, county, lagosid_cty, lagosname_cty, mcounty, comment, mreview, words, linked_lagosid = row # if mcounty is not None: # words = lagosGIS.list_shared_words() # else: # join5 = join4 # if state in LAGOSNE_STATES: DM.JoinField(join4, lake_id_field, master_xwalk, 'lagosne_legacyid', ['lagoslakeid', 'lagos_lakename', 'lagos_state']) update_fields = [ lake_id_field, lake_name_field, MASTER_LAKE_ID + '_12_13', 'lagos_lakename', 'lagos_state', # crosswalk match 'Auto_Comment', 'Manual_Review', 'Shared_Words', 'Linked_lagoslakeid', 'Is_Legacy_Link' ] with arcpy.da.UpdateCursor(join4, update_fields) as uCursor: for uRow in uCursor: id, name, mid_x, mname_x, state_x, comment, review, words, lagosid, legacy_flag = uRow # fields are populated already from links above. Revise only if legacy links if mid_x is not None: if state == state_x: legacy_flag = 'Y' # set to Y regardless of whether using legacy comment if state matches if comment != 'Exact location link': review = 1 if state != state_x: review = 3 # downgrade if states mismatch--border lakes OK, random common IDs NOT. Check. legacy_flag = 'Y' comment = 'LAGOS-NE legacy link' # only comment non-exact location matches lagosid = mid_x if name and mname_x: words = lagosGIS.list_shared_words( name, mname_x) # update words only if legacy comment new_row = id, name, mid_x, mname_x, state_x, comment, review, words, lagosid, legacy_flag uCursor.updateRow(new_row) # # Undo the next line if you ever bring this chunk back. join5 = join4 # then re-code the no matches as a 3 and copy comments to the editable field # compress the joined lake ids into one field # having two fields lets us keep track of how many of the auto matches are bad if arcpy.ListFields(join5, 'Comment'): comment_field_name = 'Comment_LAGOS' else: comment_field_name = 'Comment' DM.AddField(join5, comment_field_name, 'TEXT', field_length=100) with arcpy.da.UpdateCursor( join5, ['Manual_Review', 'Auto_Comment', 'Comment']) as cursor: for flag, ac, comment in cursor: if flag is None: flag = 3 ac = 'Not linked' comment = ac cursor.updateRow((flag, ac, comment)) # Re-code points more than 100m into the polygon of the lake as no need to check DM.MakeFeatureLayer(join5, 'join5_lyr') DM.MakeFeatureLayer(master_lakes_lines, 'lake_lines_lyr') DM.SelectLayerByAttribute('join5_lyr', 'NEW_SELECTION', "Auto_Comment = 'Exact location link'") DM.SelectLayerByLocation('join5_lyr', 'INTERSECT', 'lake_lines_lyr', '100 meters', 'SUBSET_SELECTION', 'INVERT') DM.CalculateField('join5_lyr', 'Manual_Review', '-2', 'PYTHON') DM.Delete('join5_lyr', 'lake_lines_lyr') # Then make sure to only keep the fields necessary when you write to an output copy_fields = point_fields + [ 'Linked_lagoslakeid', 'Auto_Comment', 'Manual_Review', 'Is_Legacy_Link', 'Shared_Words', 'Comment', 'Duplicate_Candidate', 'GEO_Discovered_Name' ] copy_fields.remove('Shape') copy_fields.remove('OBJECTID') lagosGIS.select_fields(join5, out_fc, copy_fields) DM.AssignDomainToField(out_fc, 'Comment', 'Comment') DM.AddField(out_fc, 'Total_points_in_lake_poly', 'Short') # Remove any duplicates. (These originate from the join3/join4 transition because a point can be both # within 10m and 100m of lakes, this code takes the closest lake as true for my current sanity.) # Or, in other words, this is a hack solution. out_fc_fields = [ f.name for f in arcpy.ListFields(out_fc) if f.name != 'OBJECTID' ] DM.DeleteIdentical(out_fc, out_fc_fields) # Get the join_count for each limno lake ID # De-dupe anything resulting from limno ID duplicates first before counting id_pairs = list( set( arcpy.da.SearchCursor(out_fc, [lake_id_field, 'Linked_lagoslakeid']))) # THEN pull out LAGOS id. Any duplicate now are only due to multiple distinct points within lake lagos_ids = [ids[1] for ids in id_pairs] sample_ids = [ids[0] for ids in id_pairs] lagos_lake_counts = Counter(lagos_ids) linked_multiple_lake_counts = Counter(sample_ids) # Get the count of points in the polygon with arcpy.da.UpdateCursor( out_fc, ['Linked_lagoslakeid', 'Total_points_in_lake_poly']) as cursor: for lagos_id, join_count in cursor: join_count = lagos_lake_counts[lagos_id] cursor.updateRow((lagos_id, join_count)) # Mark any samples linked to more than one lake so that the analyst can select the correct lake in the # manual process with arcpy.da.UpdateCursor( out_fc, [lake_id_field, 'Duplicate_Candidate']) as cursor: for sample_id, duplicate_flag in cursor: duplicate_count = linked_multiple_lake_counts[sample_id] if duplicate_count > 1: duplicate_flag = "Y" else: duplicate_flag = "N" cursor.updateRow((sample_id, duplicate_flag)) # clean up DM.AddField(out_fc, 'Note', 'TEXT', field_length=140) DM.Delete('in_memory') arcpy.AddMessage('Completed.')
def process_zone(zone_fc, output, zone_name, zone_id_field, zone_name_field, other_keep_fields, clip_hu8, lagosne_name): # dissolve fields by the field that zone_id is based on (the field that identifies a unique zone) dissolve_fields = [ f for f in "{}, {}, {}".format(zone_id_field, zone_name_field, other_keep_fields).split(', ') if f != '' ] print("Dissolving...") dissolve1 = DM.Dissolve(zone_fc, 'dissolve1', dissolve_fields) # update name field to match our standard DM.AlterField(dissolve1, zone_name_field, 'name') # original area DM.AddField(dissolve1, 'originalarea', 'DOUBLE') DM.CalculateField(dissolve1, 'originalarea', '!shape.area@hectares!', 'PYTHON') #clip print("Clipping...") clip = AN.Clip(dissolve1, MASTER_CLIPPING_POLY, 'clip') if clip_hu8 == 'Y': final_clip = AN.Clip(clip, HU8_OUTPUT, 'final_clip') else: final_clip = clip print("Selecting...") # calc new area, orig area pct, compactness DM.AddField(final_clip, 'area_ha', 'DOUBLE') DM.AddField(final_clip, 'originalarea_pct', 'DOUBLE') DM.AddField(final_clip, 'compactness', 'DOUBLE') DM.JoinField(final_clip, zone_id_field, dissolve1, zone_id_field, 'originalarea_pct') uCursor_fields = [ 'area_ha', 'originalarea_pct', 'originalarea', 'compactness', 'SHAPE@AREA', 'SHAPE@LENGTH' ] with arcpy.da.UpdateCursor(final_clip, uCursor_fields) as uCursor: for row in uCursor: area, orig_area_pct, orig_area, comp, shape_area, shape_length = row area = shape_area / 10000 # convert from m2 to hectares orig_area_pct = round(100 * area / orig_area, 2) comp = 4 * 3.14159 * shape_area / (shape_length**2) row = (area, orig_area_pct, orig_area, comp, shape_area, shape_length) uCursor.updateRow(row) # if zones are present with <5% of original area and a compactness measure of <.2 (ranges from 0-1) # AND ALSO they are no bigger than 500 sq. km. (saves Chippewa County and a WWF), filter out # save eliminated polygons to temp database as a separate layer for inspection # Different processing for HU4 and HU8, so that they match the extent of HU8 more closely but still throw out tiny slivers # County also only eliminated if a tiny, tiny, tiny sliver (so: none should be eliminated) if zone_name not in ('hu4', 'hu12', 'county'): selected = AN.Select( final_clip, 'selected', "originalarea_pct >= 5 OR compactness >= .2 OR area_ha > 50000") not_selected = AN.Select( final_clip, '{}_not_selected'.format(output), "originalarea_pct < 5 AND compactness < .2 AND area_ha < 50000") else: selected = final_clip # eliminate small slivers, re-calc area fields, add perimeter and multipart flag # leaves the occasional errant sliver but some areas over 25 hectares are more valid so this is # CONSERVATIVE print("Trimming...") trimmed = DM.EliminatePolygonPart(selected, 'trimmed', 'AREA', '25 Hectares', part_option='ANY') # gather up a few calculations into one cursor because this is taking too long over the HU12 layer DM.AddField(trimmed, 'perimeter_m', 'DOUBLE') DM.AddField(trimmed, 'multipart', 'TEXT', field_length=1) uCursor_fields = [ 'area_ha', 'originalarea_pct', 'originalarea', 'perimeter_m', 'multipart', 'SHAPE@' ] with arcpy.da.UpdateCursor(trimmed, uCursor_fields) as uCursor: for row in uCursor: area, orig_area_pct, orig_area, perim, multipart, shape = row area = shape.area / 10000 # convert to hectares from m2 orig_area_pct = round(100 * area / orig_area, 2) perim = shape.length # multipart flag calc if shape.isMultipart: multipart = 'Y' else: multipart = 'N' row = (area, orig_area_pct, orig_area, perim, multipart, shape) uCursor.updateRow(row) # delete intermediate fields DM.DeleteField(trimmed, 'compactness') DM.DeleteField(trimmed, 'originalarea') print("Zone IDs....") # link to LAGOS-NE zone IDs DM.AddField(trimmed, 'zoneid', 'TEXT', field_length=40) trimmed_lyr = DM.MakeFeatureLayer(trimmed, 'trimmed_lyr') if lagosne_name: # join to the old master GDB path on the same master field and copy in the ids old_fc = os.path.join(LAGOSNE_GDB, lagosne_name) old_fc_lyr = DM.MakeFeatureLayer(old_fc, 'old_fc_lyr') if lagosne_name == 'STATE' or lagosne_name == 'COUNTY': DM.AddJoin(trimmed_lyr, zone_id_field, old_fc_lyr, 'FIPS') else: DM.AddJoin(trimmed_lyr, zone_id_field, old_fc_lyr, zone_id_field) # usually works because same source data # copy DM.CalculateField(trimmed_lyr, 'zoneid', '!{}.ZoneID!.lower()'.format(lagosne_name), 'PYTHON') DM.RemoveJoin(trimmed_lyr) # generate new zone ids old_ids = [row[0] for row in arcpy.da.SearchCursor(trimmed, 'zoneid')] with arcpy.da.UpdateCursor(trimmed, 'zoneid') as cursor: counter = 1 for row in cursor: if not row[ 0]: # if no existing ID borrowed from LAGOS-NE, assign a new one new_id = '{name}_{num}'.format(name=zone_name, num=counter) # ensures new ids don't re-use old numbers but fills in all positive numbers eventually while new_id in old_ids: counter += 1 new_id = '{name}_{num}'.format(name=zone_name, num=counter) row[0] = new_id cursor.updateRow(row) counter += 1 print("Edge flags...") # add flag fields DM.AddField(trimmed, 'onlandborder', 'TEXT', field_length=2) DM.AddField(trimmed, 'oncoast', 'TEXT', field_length=2) # identify border zones border_lyr = DM.MakeFeatureLayer(LAND_BORDER, 'border_lyr') DM.SelectLayerByLocation(trimmed_lyr, 'INTERSECT', border_lyr) DM.CalculateField(trimmed_lyr, 'onlandborder', "'Y'", 'PYTHON') DM.SelectLayerByAttribute(trimmed_lyr, 'SWITCH_SELECTION') DM.CalculateField(trimmed_lyr, 'onlandborder', "'N'", 'PYTHON') # identify coastal zones coastal_lyr = DM.MakeFeatureLayer(COASTLINE, 'coastal_lyr') DM.SelectLayerByLocation(trimmed_lyr, 'INTERSECT', coastal_lyr) DM.CalculateField(trimmed_lyr, 'oncoast', "'Y'", 'PYTHON') DM.SelectLayerByAttribute(trimmed_lyr, 'SWITCH_SELECTION') DM.CalculateField(trimmed_lyr, 'oncoast', "'N'", 'PYTHON') print("State assignment...") # State? DM.AddField(trimmed, "state", 'text', field_length='2') state_center = arcpy.SpatialJoin_analysis( trimmed, STATE_FC, 'state_center', join_type='KEEP_COMMON', match_option='HAVE_THEIR_CENTER_IN') state_intersect = arcpy.SpatialJoin_analysis(trimmed, STATE_FC, 'state_intersect', match_option='INTERSECT') state_center_dict = { row[0]: row[1] for row in arcpy.da.SearchCursor(state_center, ['ZoneID', 'STUSPS']) } state_intersect_dict = { row[0]: row[1] for row in arcpy.da.SearchCursor(state_intersect, ['ZoneID', 'STUSPS']) } with arcpy.da.UpdateCursor(trimmed, ['ZoneID', 'state']) as cursor: for updateRow in cursor: keyValue = updateRow[0] if keyValue in state_center_dict: updateRow[1] = state_center_dict[keyValue] else: updateRow[1] = state_intersect_dict[keyValue] cursor.updateRow(updateRow) # glaciation status? # TODO as version 0.6 # preface the names with the zones DM.DeleteField(trimmed, 'ORIG_FID') fields = [ f.name for f in arcpy.ListFields(trimmed, '*') if f.type not in ('OID', 'Geometry') and not f.name.startswith('Shape_') ] for f in fields: new_fname = '{zn}_{orig}'.format(zn=zone_name, orig=f).lower() try: DM.AlterField(trimmed, f, new_fname, clear_field_alias='TRUE') # sick of debugging the required field message-I don't want to change required fields anyway except: pass DM.CopyFeatures(trimmed, output) # cleanup lyr_objects = [ lyr_object for var_name, lyr_object in locals().items() if var_name.endswith('lyr') ] temp_fcs = arcpy.ListFeatureClasses('*') for l in lyr_objects + temp_fcs: DM.Delete(l)
# loop through all binary (0/1) grids, build the hypergrid # with info stored in a single text column for i in range(start, len(codeL)): elem = codeL[i] rasName = elem + "_c.tif" if rasName in rasL: if i == 0: inRas = inPath + "/" + rasName curHyp = wrk + "/hyp" + str(i) print("working on " + rasName) man.CopyRaster(inRas, curHyp) man.BuildRasterAttributeTable(curHyp) man.AddField(curHyp, "spp0", "TEXT", "", "", 251) man.AddField(curHyp, "temp", "SHORT", 1) expr = "str( !Value! )" man.CalculateField(curHyp, "spp0", expr, "PYTHON") else: iminus = i - 1 prevHyp = wrk + "/hyp" + str(iminus) print("working on " + elem + ", " + str(i) + " of " + str(listLen)) curHyp = Combine([prevHyp, rasName]) curHyp.save(wrk + "/hyp" + str(i)) man.AddField(curHyp, "spp0", "TEXT", "", "", 251) jval = "hyp" + str(iminus) man.JoinField(curHyp, jval, prevHyp, "VALUE", ["spp0"]) rasNoDot = rasName[0:rasName.find(".")] newCol = rasNoDot[0:11].upper() expr = "str(!spp0_1!) + str(!" + newCol + "!)" man.CalculateField(curHyp, "spp0", expr, "PYTHON") #clean up man.Delete(prevHyp)
def basinAve(aoiBasin, pmpField): pmpPoints = env.scratchGDB + "\\PMP_Points" # Path of 'PMP_Points' scratch feature class if weightedAve: arcpy.AddMessage("\tCalculating basin average for " + pmpField + "(weighted)...") vectorGridClip = env.scratchGDB + "\\VectorGridClip" # Path of 'PMP_Points' scratch feature class sumstats = env.scratchGDB + "\\SummaryStats" dm.MakeFeatureLayer( home + "\\Input\Non_Storm_Data.gdb\\Vector_Grid", "vgLayer") # make a feature layer of vector grid cells dm.SelectLayerByLocation( "vgLayer", "INTERSECT", aoiBasin ) # select the vector grid cells that intersect the aoiBasin polygon an.Clip("vgLayer", aoiBasin, vectorGridClip) # clips aoi vector grid to basin dm.AddField( pmpPoints, "WEIGHT", "DOUBLE" ) # adds 'WEIGHT' field to PMP_Points scratch feature class dm.MakeFeatureLayer( vectorGridClip, "vgClipLayer" ) # make a feature layer of basin clipped vector grid cells dm.MakeFeatureLayer( pmpPoints, "pmpPointsLayer" ) # make a feature layer of PMP_Points feature class dm.AddJoin("pmpPointsLayer", "ID", "vgClipLayer", "ID") # joins PMP_Points and vectorGridBasin tables dm.CalculateField( "pmpPointsLayer", "WEIGHT", "!vectorGridClip.Shape_Area!", "PYTHON_9.3" ) # Calculates basin area proportion to use as weight for each grid cell. dm.RemoveJoin("pmpPointsLayer", "vectorGridClip") an.Statistics(pmpPoints, sumstats, [["WEIGHT", "SUM"]], "") stats = arcpy.SearchCursor(sumstats) pmpWgtAve = pmpField + "_WgtAve" for row in stats: calc = row.getValue("SUM_WEIGHT") express = "(!WEIGHT!/{})* !{}!".format(calc, pmpField) i = 0 for field in arcpy.ListFields(pmpPoints, pmpField): dm.AddField(pmpPoints, pmpWgtAve, "DOUBLE", 2) dm.CalculateField(pmpPoints, pmpWgtAve, express, "PYTHON_9.3") i += 1 del stats, row an.Statistics(pmpPoints, sumstats, [[pmpWgtAve, "SUM"]], "") sumwgtave = "SUM_" + pmpWgtAve with arcpy.da.SearchCursor(sumstats, sumwgtave) as stats: for row in stats: wgtAve = row[0] return round(wgtAve, 2) ## na = arcpy.da.TableToNumPyArray(pmpPoints,(pmpField, 'WEIGHT')) # Assign pmpPoints values and weights to Numpy array (na) ## wgtAve = numpy.average(na[pmpField], weights=na['WEIGHT']) # Calculate weighted average with Numpy average ## del na ## return round(wgtAve, 2) else: arcpy.AddMessage("\tCalculating basin average for " + pmpField + "(not weighted)...") sumstats = env.scratchGDB + "\\SummaryStats" an.Statistics(pmpPoints, sumstats, [[pmpField, "MEAN"]], "") mean = "MEAN_" + pmpField with arcpy.da.SearchCursor(sumstats, mean) as stats: for row in stats: fieldAve = row[0] return round(fieldAve, 2)
# don't assume i is the class level -- extract class here classLevel = hypZ[i][-1:] curZo = wrk + "/zon_C" + classLevel # cycle through each edm for j in range(len(rasL)): if j == 0: inRas = inPath + "/" + rasL[j] + "_c.tif" curZoT_out = wrk + "/zonTab_C" + str(i) + "_" + str(j) print(".. zoning " + rasL[j]) curZoT = ZonalStatisticsAsTable(hypZ[i], "Value", inRas, curZoT_out, "DATA", "MAXIMUM") man.CopyRaster(hypZ[i], curZo) man.AddField(curZo, "spp0", "TEXT", "", "", 251) man.JoinField(curZo, "Value", curZoT, "VALUE", ["MAX"]) expr = "str( !MAX! )" man.CalculateField(curZo, "spp0", expr, "PYTHON") man.DeleteField(curZo, "MAX") man.Delete(curZoT_out) else: #jminus = j-1 inRas = inPath + "/" + rasL[j] + "_c.tif" print(".. zoning " + rasL[j]) curZoT_out = wrk + "/zonTab_C" + str(i) + "_" + str(j) curZoT = ZonalStatisticsAsTable(hypZ[i], "Value", inRas, curZoT_out, "DATA", "MAXIMUM") man.JoinField(curZo, "Value", curZoT, "VALUE", ["MAX"]) expr = "str(!spp0!) + str(!MAX!)" man.CalculateField(curZo, "spp0", expr, "PYTHON") man.DeleteField(curZo, "MAX") man.Delete(curZoT_out) # expand information out to one col for each spp.
def classify_lakes(nhd, out_feature_class, exclude_intermit_flowlines=False, debug_mode=False): if debug_mode: arcpy.env.overwriteOutput = True temp_gdb = cu.create_temp_GDB('classify_lake_connectivity') arcpy.env.workspace = temp_gdb arcpy.AddMessage('Debugging workspace located at {}'.format(temp_gdb)) else: arcpy.env.workspace = 'in_memory' if arcpy.Exists("temp_fc"): print("There is a problem here.") raise Exception # Tool temporary feature classes temp_fc = "temp_fc" csiwaterbody_10ha = "csiwaterbody_10ha" nhdflowline_filtered = "nhdflowline_filtered" dangles = "dangles" start = "start" end = "end" startdangles = "startdangles" enddangles = "enddangles" non_artificial_end = "non_artificial_end" flags_10ha_lake_junctions = "flags_10ha_lake_junctions" midvertices = "midvertices" non10vertices = "non10vertices" non10junctions = "non10junctions" all_non_flag_points = "all_non_flag_points" barriers = "barriers" trace1_junctions = "trace1_junctions" trace1_flowline = "trace1_flowline" trace2_junctions = "trace2junctions" trace2_flowline = "trace2_flowline" # Clean up workspace in case of bad exit from prior run in same session. this_tool_layers = [ "dangles_lyr", "nhdflowline_lyr", "junction_lyr", "midvertices_lyr", "all_non_flag_points_lyr", "non10vertices_lyr", "out_fc_lyr", "trace1", "trace2" ] this_tool_temp = [ temp_fc, csiwaterbody_10ha, nhdflowline_filtered, dangles, start, end, startdangles, enddangles, non_artificial_end, flags_10ha_lake_junctions, midvertices, non10vertices, non10junctions, all_non_flag_points, barriers, trace1_junctions, trace1_flowline, trace2_junctions, trace2_flowline ] for item in this_tool_layers + this_tool_temp: try: DM.Delete(item) except: pass # Local variables: nhdflowline = os.path.join(nhd, "Hydrography", "NHDFLowline") nhdjunction = os.path.join(nhd, "Hydrography", "HYDRO_NET_Junctions") nhdwaterbody = os.path.join(nhd, "Hydrography", "NHDWaterbody") network = os.path.join(nhd, "Hydrography", "HYDRO_NET") # Get lakes, ponds and reservoirs over a hectare. #csi_population_filter = '''"AreaSqKm" >=0.01 AND\ #"FCode" IN (39000,39004,39009,39010,39011,39012,43600,43613,43615,43617,43618,43619,43621)''' all_lakes_reservoirs_filter = '''"FType" IN (390, 436)''' # Can't see why we shouldn't just attribute all lakes and reservoirs # arcpy.Select_analysis(nhdwaterbody, "csiwaterbody", lake_population_filter) arcpy.AddMessage("Initializing output.") if exclude_intermit_flowlines: DM.CopyFeatures(out_feature_class, temp_fc) DM.Delete(out_feature_class) else: arcpy.Select_analysis(nhdwaterbody, temp_fc, all_lakes_reservoirs_filter) # Get lakes, ponds and reservoirs over 10 hectares. lakes_10ha_filter = '''"AreaSqKm" >= 0.1 AND "FType" IN (390, 436)''' arcpy.Select_analysis(nhdwaterbody, csiwaterbody_10ha, lakes_10ha_filter) # Exclude intermittent flowlines, if requested if exclude_intermit_flowlines: flowline_where_clause = '''"FCode" NOT IN (46003,46007)''' nhdflowline = arcpy.Select_analysis(nhdflowline, nhdflowline_filtered, flowline_where_clause) # Make dangle points at end of nhdflowline DM.FeatureVerticesToPoints(nhdflowline, dangles, "DANGLE") DM.MakeFeatureLayer(dangles, "dangles_lyr") # Isolate start dangles from end dangles. DM.FeatureVerticesToPoints(nhdflowline, start, "START") DM.FeatureVerticesToPoints(nhdflowline, end, "END") DM.SelectLayerByLocation("dangles_lyr", "ARE_IDENTICAL_TO", start) DM.CopyFeatures("dangles_lyr", startdangles) DM.SelectLayerByLocation("dangles_lyr", "ARE_IDENTICAL_TO", end) DM.CopyFeatures("dangles_lyr", enddangles) # Special handling for lakes that have some intermittent flow in and some permanent if exclude_intermit_flowlines: DM.MakeFeatureLayer(nhdflowline, "nhdflowline_lyr") DM.SelectLayerByAttribute("nhdflowline_lyr", "NEW_SELECTION", '''"WBArea_Permanent_Identifier" is null''') DM.FeatureVerticesToPoints("nhdflowline_lyr", non_artificial_end, "END") DM.SelectLayerByAttribute("nhdflowline_lyr", "CLEAR_SELECTION") arcpy.AddMessage("Found source area nodes.") # Get junctions from lakes >= 10 hectares. DM.MakeFeatureLayer(nhdjunction, "junction_lyr") DM.SelectLayerByLocation("junction_lyr", "INTERSECT", csiwaterbody_10ha, XY_TOLERANCE, "NEW_SELECTION") DM.CopyFeatures("junction_lyr", flags_10ha_lake_junctions) arcpy.AddMessage("Found lakes >= 10 ha.") # Make points shapefile and layer at flowline vertices to act as potential flags and/or barriers. arcpy.AddMessage("Tracing...") DM.FeatureVerticesToPoints(nhdflowline, midvertices, "MID") DM.MakeFeatureLayer(midvertices, "midvertices_lyr") # Get vertices that are not coincident with 10 hectare lake junctions. DM.SelectLayerByLocation("midvertices_lyr", "INTERSECT", flags_10ha_lake_junctions, "", "NEW_SELECTION") DM.SelectLayerByLocation("midvertices_lyr", "INTERSECT", flags_10ha_lake_junctions, "", "SWITCH_SELECTION") DM.CopyFeatures("midvertices_lyr", non10vertices) # Get junctions that are not coincident with 10 hectare lake junctions. DM.SelectLayerByLocation("junction_lyr", "INTERSECT", flags_10ha_lake_junctions, "", "NEW_SELECTION") DM.SelectLayerByLocation("junction_lyr", "INTERSECT", flags_10ha_lake_junctions, "", "SWITCH_SELECTION") DM.CopyFeatures("junction_lyr", non10junctions) # Merge non10vertices with non10junctions DM.Merge([non10junctions, non10vertices], all_non_flag_points) # inputs both point fc in_memory DM.MakeFeatureLayer(all_non_flag_points, "all_non_flag_points_lyr") # Tests the counts...for some reason I'm not getting stable behavior from the merge. mid_n = int(DM.GetCount(non10vertices).getOutput(0)) jxn_n = int(DM.GetCount(non10junctions).getOutput(0)) merge_n = int(DM.GetCount(all_non_flag_points).getOutput(0)) if merge_n < mid_n + jxn_n: arcpy.AddWarning( "The total number of flags ({0}) is less than the sum of the input junctions ({1}) " "and input midpoints ({2})".format(merge_n, jxn_n, mid_n)) # For tracing barriers, select all_non_flag_points points that intersect a 10 ha lake. DM.SelectLayerByLocation("all_non_flag_points_lyr", "INTERSECT", csiwaterbody_10ha, XY_TOLERANCE, "NEW_SELECTION") DM.CopyFeatures("all_non_flag_points_lyr", barriers) # Trace1-Trace downstream to first barrier (junctions+midvertices in 10 ha lake) starting from flags_10ha_lake_junctions flag points. DM.TraceGeometricNetwork(network, "trace1", flags_10ha_lake_junctions, "TRACE_DOWNSTREAM", barriers) # Save trace1 flowlines and junctions to layers on disk. DM.CopyFeatures("trace1\HYDRO_NET_Junctions", trace1_junctions) # extra for debugging DM.CopyFeatures("trace1\NHDFlowline", trace1_flowline) # Select vertice midpoints that intersect trace1 flowlines selection for new flags for trace2. DM.MakeFeatureLayer(non10vertices, "non10vertices_lyr") DM.SelectLayerByLocation("non10vertices_lyr", "INTERSECT", trace1_flowline, "", "NEW_SELECTION") # Trace2-Trace downstream from midpoints of flowlines that intersect the selected flowlines from trace1. DM.TraceGeometricNetwork(network, "trace2", "non10vertices_lyr", "TRACE_DOWNSTREAM") # Save trace1 flowlines and junctions to layers and then shapes on disk. DM.CopyFeatures("trace2\HYDRO_NET_Junctions", trace2_junctions) DM.CopyFeatures("trace2\NHDFlowline", trace2_flowline) # extra for debugging arcpy.AddMessage("Done tracing.") # Make shapefile for seepage lakes. (Ones that don't intersect flowlines) if exclude_intermit_flowlines: class_field_name = "Lake_Connectivity_Permanent" else: class_field_name = "Lake_Connectivity_Class" DM.AddField(temp_fc, class_field_name, "TEXT", field_length=13) DM.MakeFeatureLayer(temp_fc, "out_fc_lyr") DM.SelectLayerByLocation("out_fc_lyr", "INTERSECT", nhdflowline, XY_TOLERANCE, "NEW_SELECTION") DM.SelectLayerByLocation("out_fc_lyr", "INTERSECT", nhdflowline, "", "SWITCH_SELECTION") DM.CalculateField("out_fc_lyr", class_field_name, """'Isolated'""", "PYTHON") # New type of "Isolated" classification, mostly for "permanent" but there were some oddballs in "maximum" too DM.SelectLayerByLocation("out_fc_lyr", "INTERSECT", startdangles, XY_TOLERANCE, "NEW_SELECTION") DM.SelectLayerByLocation("out_fc_lyr", "INTERSECT", enddangles, XY_TOLERANCE, "SUBSET_SELECTION") DM.CalculateField("out_fc_lyr", class_field_name, """'Isolated'""", "PYTHON") # Get headwater lakes. DM.SelectLayerByLocation("out_fc_lyr", "INTERSECT", startdangles, XY_TOLERANCE, "NEW_SELECTION") DM.SelectLayerByAttribute( "out_fc_lyr", "REMOVE_FROM_SELECTION", '''"{}" = 'Isolated' '''.format(class_field_name)) DM.CalculateField("out_fc_lyr", class_field_name, """'Headwater'""", "PYTHON") # Select csiwaterbody that intersect trace2junctions arcpy.AddMessage("Beginning connectivity attribution...") DM.SelectLayerByLocation("out_fc_lyr", "INTERSECT", trace2_junctions, XY_TOLERANCE, "NEW_SELECTION") DM.CalculateField("out_fc_lyr", class_field_name, """'DrainageLk'""", "PYTHON") # Get stream drainage lakes. Either unassigned so far or convert "Headwater" if a permanent stream flows into it, # which is detected with "non_artificial_end" DM.SelectLayerByAttribute("out_fc_lyr", "NEW_SELECTION", '''"{}" IS NULL'''.format(class_field_name)) DM.CalculateField("out_fc_lyr", class_field_name, """'Drainage'""", "PYTHON") if exclude_intermit_flowlines: DM.SelectLayerByAttribute( "out_fc_lyr", "NEW_SELECTION", '''"{}" = 'Headwater' '''.format(class_field_name)) DM.SelectLayerByLocation("out_fc_lyr", "INTERSECT", non_artificial_end, XY_TOLERANCE, "SUBSET_SELECTION") DM.CalculateField("out_fc_lyr", class_field_name, """'Drainage'""", "PYTHON") # Prevent 'upgrades' due to very odd flow situations and artifacts of bad digitization. The effects of these # are varied--to avoid confusion, just keep the class assigned with all flowlines # 1--Purely hypothetical, not seen in testing DM.SelectLayerByAttribute( "out_fc_lyr", "NEW_SELECTION", '''"Lake_Connectivity_Class" = 'Isolated' AND "Lake_Connectivity_Permanent" <> 'Isolated' ''' ) DM.CalculateField("out_fc_lyr", class_field_name, """'Isolated'""", "PYTHON") # 2--Headwater to Drainage upgrade seen in testing with odd multi-inlet flow situation DM.SelectLayerByAttribute( "out_fc_lyr", "NEW_SELECTION", '''"Lake_Connectivity_Class" = 'Headwater' AND "Lake_Connectivity_Permanent" IN ('Drainage', 'DrainageLk') ''' ) DM.CalculateField("out_fc_lyr", class_field_name, """'Headwater'""", "PYTHON") # 3--Drainage to DrainageLk upgrade seen in testing when intermittent stream segments were used # erroneously instead of artificial paths DM.SelectLayerByAttribute( "out_fc_lyr", "NEW_SELECTION", '''"Lake_Connectivity_Class" = 'Drainage' AND "Lake_Connectivity_Permanent" = 'DrainageLk' ''' ) DM.CalculateField("out_fc_lyr", class_field_name, """'Drainage'""", "PYTHON") DM.SelectLayerByAttribute("out_fc_lyr", "CLEAR_SELECTION") # Add change flag for users DM.AddField(temp_fc, "Lake_Connectivity_Fluctuates", "Text", field_length="1") flag_codeblock = """def flag_calculate(arg1, arg2): if arg1 == arg2: return 'N' else: return 'Y'""" expression = 'flag_calculate(!Lake_Connectivity_Class!, !Lake_Connectivity_Permanent!)' DM.CalculateField(temp_fc, "Lake_Connectivity_Fluctuates", expression, "PYTHON", flag_codeblock) # Project output once done with both. Switching CRS earlier causes trace problems. if not exclude_intermit_flowlines: DM.CopyFeatures(temp_fc, out_feature_class) else: DM.Project(temp_fc, out_feature_class, arcpy.SpatialReference(102039)) # Clean up if not debug_mode: for item in this_tool_layers + this_tool_temp: if arcpy.Exists(item): DM.Delete(item) if not debug_mode: DM.Delete("trace1") DM.Delete("trace2") arcpy.AddMessage("{} classification is complete.".format(class_field_name))
def stats_area_table(zone_fc=zone_fc, zone_field=zone_field, in_value_raster=in_value_raster, out_table=out_table, is_thematic=is_thematic): def refine_zonal_output(t): """Makes a nicer output for this tool. Rename some fields, drop unwanted ones, calculate percentages using raster AREA before deleting that field.""" if is_thematic: value_fields = arcpy.ListFields(t, "VALUE*") pct_fields = [ '{}_pct'.format(f.name) for f in value_fields ] # VALUE_41_pct, etc. Field can't start with number. # add all the new fields needed for f, pct_field in zip(value_fields, pct_fields): arcpy.AddField_management(t, pct_field, f.type) # calculate the percents cursor_fields = ['AREA'] + [f.name for f in value_fields] + pct_fields uCursor = arcpy.da.UpdateCursor(t, cursor_fields) for uRow in uCursor: # unpacks area + 3 tuples of the right fields for each, no matter how many there are vf_i_end = len(value_fields) + 1 pf_i_end = vf_i_end + len(pct_fields) # pct_values and ha_values are both null at this point but unpack for clarity area, value_values, pct_values = uRow[0], uRow[ 1:vf_i_end], uRow[vf_i_end:pf_i_end] new_pct_values = [100 * vv / area for vv in value_values] new_row = [area] + value_values + new_pct_values uCursor.updateRow(new_row) for vf in value_fields: arcpy.DeleteField_management(t, vf.name) arcpy.AlterField_management(t, 'COUNT', 'CELL_COUNT') drop_fields = ['ZONE_CODE', 'COUNT', 'AREA'] if not debug_mode: for df in drop_fields: try: arcpy.DeleteField_management(t, df) except: continue # Set up environments for alignment between zone raster and theme raster if isinstance(zone_fc, arcpy.Result): zone_fc = zone_fc.getOutput(0) this_files_dir = os.path.dirname(os.path.abspath(__file__)) os.chdir(this_files_dir) common_grid = os.path.abspath('../common_grid.tif') env.snapRaster = common_grid env.cellSize = common_grid env.extent = zone_fc zone_desc = arcpy.Describe(zone_fc) zone_raster = 'convertraster' if zone_desc.dataType not in ['RasterDataset', 'RasterLayer']: zone_raster = arcpy.PolygonToRaster_conversion( zone_fc, zone_field, zone_raster, 'CELL_CENTER', cellsize=env.cellSize) print('cell size is {}'.format(env.cellSize)) zone_size = int(env.cellSize) else: zone_raster = zone_fc zone_size = min( arcpy.Describe(zone_raster).meanCellHeight, arcpy.Describe(zone_raster).meanCellWidth) raster_size = min( arcpy.Describe(in_value_raster).meanCellHeight, arcpy.Describe(in_value_raster).meanCellWidth) env.cellSize = min([zone_size, raster_size]) print('cell size is {}'.format(env.cellSize)) # I tested and there is no need to resample the raster being summarized. It will be resampled correctly # internally in the following tool given that the necessary environments are set above (cell size, snap). # # in_value_raster = arcpy.Resample_management(in_value_raster, 'in_value_raster_resampled', CELL_SIZE) if not is_thematic: arcpy.AddMessage("Calculating Zonal Statistics...") temp_entire_table = arcpy.sa.ZonalStatisticsAsTable( zone_raster, zone_field, in_value_raster, 'temp_zonal_table', 'DATA', 'MEAN') if is_thematic: # for some reason env.cellSize doesn't work # calculate/doit arcpy.AddMessage("Tabulating areas...") temp_entire_table = arcpy.sa.TabulateArea( zone_raster, zone_field, in_value_raster, 'Value', 'temp_area_table', processing_cell_size=env.cellSize) # TabulateArea capitalizes the zone for some annoying reason and ArcGIS is case-insensitive to field names # so we have this work-around: zone_field_t = '{}_t'.format(zone_field) DM.AddField(temp_entire_table, zone_field_t, 'TEXT', field_length=20) expr = '!{}!'.format(zone_field.upper()) DM.CalculateField(temp_entire_table, zone_field_t, expr, 'PYTHON') DM.DeleteField(temp_entire_table, zone_field.upper()) DM.AlterField(temp_entire_table, zone_field_t, zone_field, clear_field_alias=True) # replaces join to Zonal Stats in previous versions of tool # no joining, just calculate the area/count from what's produced by TabulateArea arcpy.AddField_management(temp_entire_table, 'AREA', 'DOUBLE') arcpy.AddField_management(temp_entire_table, 'COUNT', 'DOUBLE') cursor_fields = ['AREA', 'COUNT'] value_fields = [ f.name for f in arcpy.ListFields(temp_entire_table, 'VALUE*') ] cursor_fields.extend(value_fields) with arcpy.da.UpdateCursor(temp_entire_table, cursor_fields) as uCursor: for uRow in uCursor: area, count, value_fields = uRow[0], uRow[1], uRow[2:] area = sum(value_fields) count = round( area / (int(env.cellSize) * int(env.cellSize)), 0) new_row = [area, count] + value_fields uCursor.updateRow(new_row) arcpy.AddMessage("Refining output table...") arcpy.AddField_management(temp_entire_table, 'datacoveragepct', 'DOUBLE') arcpy.AddField_management(temp_entire_table, 'ORIGINAL_COUNT', 'LONG') # calculate datacoveragepct by comparing to original areas in zone raster # alternative to using JoinField, which is prohibitively slow if zones exceed hu12 count zone_raster_dict = { row[0]: row[1] for row in arcpy.da.SearchCursor(zone_raster, [zone_field, 'Count']) } temp_entire_table_dict = { row[0]: row[1] for row in arcpy.da.SearchCursor(temp_entire_table, [zone_field, 'COUNT']) } sum_cell_area = float(env.cellSize) * float(env.cellSize) orig_cell_area = zone_size * zone_size with arcpy.da.UpdateCursor( temp_entire_table, [zone_field, 'datacoveragepct', 'ORIGINAL_COUNT']) as cursor: for uRow in cursor: key_value, data_pct, count_orig = uRow count_orig = zone_raster_dict[key_value] if key_value in temp_entire_table_dict: count_summarized = temp_entire_table_dict[key_value] data_pct = 100 * float((count_summarized * sum_cell_area) / (count_orig * orig_cell_area)) else: data_pct = None cursor.updateRow((key_value, data_pct, count_orig)) # Refine the output refine_zonal_output(temp_entire_table) # in order to add vector capabilities back, need to do something with this # right now we just can't fill in polygon zones that didn't convert to raster in our system stats_result = cu.one_in_one_out(temp_entire_table, zone_fc, zone_field, out_table) # Convert "datacoveragepct" and "ORIGINAL_COUNT" values to 0 for zones with no metrics calculated with arcpy.da.UpdateCursor( out_table, [zone_field, 'datacoveragepct', 'ORIGINAL_COUNT', 'CELL_COUNT' ]) as u_cursor: for row in u_cursor: # data_coverage pct to 0 if row[1] is None: row[1] = 0 # original count filled in if a) zone outside raster bounds or b) zone too small to be rasterized if row[2] is None: if row[0] in zone_raster_dict: row[2] = zone_raster_dict[row[0]] else: row[2] = 0 # cell count set to 0 if row[3] is None: row[3] = 0 u_cursor.updateRow(row) # count whether all zones got an output record or not) out_count = int( arcpy.GetCount_management(temp_entire_table).getOutput(0)) in_count = int(arcpy.GetCount_management(zone_fc).getOutput(0)) count_diff = in_count - out_count # cleanup if not debug_mode: for item in [ 'temp_zonal_table', temp_entire_table, 'convertraster' ]: # don't add zone_raster, orig arcpy.Delete_management(item) arcpy.ResetEnvironments() env.workspace = orig_env # hope this prevents problems using list of FCs from workspace as batch arcpy.CheckInExtension("Spatial") return [stats_result, count_diff]