def createPMPfc(): arcpy.AddMessage( "\nCreating feature class: 'PMP_Points' in Scratch.gdb...") dm.MakeFeatureLayer( home + "\\Input\Non_Storm_Data.gdb\Vector_Grid", "vgLayer") # make a feature layer of vector grid cells dm.SelectLayerByLocation( "vgLayer", "INTERSECT", aoiBasin ) # select the vector grid cells that intersect the aoiBasin polygon dm.MakeFeatureLayer(home + "\\Input\Non_Storm_Data.gdb\Grid_Points", "gpLayer") # make a feature layer of grid points dm.SelectLayerByLocation( "gpLayer", "HAVE_THEIR_CENTER_IN", "vgLayer" ) # select the grid points within the vector grid selection con.FeatureClassToFeatureClass( "gpLayer", env.scratchGDB, "PMP_Points") # save feature layer as "PMP_Points" feature class arcpy.AddMessage("(" + str(dm.GetCount("gpLayer")) + " grid points will be analyzed)\n") # Add PMP Fields for dur in durList: arcpy.AddMessage("\t...adding field: PMP_" + str(dur)) dm.AddField(env.scratchGDB + "\\PMP_Points", "PMP_" + dur, "DOUBLE") # Add STORM Fields (this string values identifies the driving storm by SPAS ID number) for dur in durList: arcpy.AddMessage("\t...adding field: STORM_" + str(dur)) dm.AddField(env.scratchGDB + "\\PMP_Points", "STORM_" + dur, "TEXT", "", "", 16) return
def process_ws(ws_fc, zone_name): # generate new zone ids DM.AddField(ws_fc, 'zoneid', 'TEXT', field_length=10) DM.CalculateField(ws_fc, 'zoneid', '!lagoslakeid!', 'PYTHON') ws_fc_lyr = DM.MakeFeatureLayer(ws_fc) # multipart DM.AddField(ws_fc, 'ismultipart', 'TEXT', field_length=2) with arcpy.da.UpdateCursor(ws_fc, ['ismultipart', 'SHAPE@']) as u_cursor: for row in u_cursor: if row[1].isMultipart: row[0] = 'Y' else: row[0] = 'N' u_cursor.updateRow(row) print("Edge flags...") # add flag fields DM.AddField(ws_fc, 'onlandborder', 'TEXT', field_length = 2) DM.AddField(ws_fc, 'oncoast', 'TEXT', field_length = 2) # identify border zones border_lyr = DM.MakeFeatureLayer(LAND_BORDER, 'border_lyr') DM.SelectLayerByLocation(ws_fc_lyr, 'INTERSECT', border_lyr) DM.CalculateField(ws_fc_lyr, 'onlandborder', "'Y'", 'PYTHON') DM.SelectLayerByAttribute(ws_fc_lyr, 'SWITCH_SELECTION') DM.CalculateField(ws_fc_lyr, 'onlandborder' ,"'N'", 'PYTHON') # identify coastal zones coastal_lyr = DM.MakeFeatureLayer(COASTLINE, 'coastal_lyr') DM.SelectLayerByLocation(ws_fc_lyr, 'INTERSECT', coastal_lyr) DM.CalculateField(ws_fc_lyr, 'oncoast', "'Y'", 'PYTHON') DM.SelectLayerByAttribute(ws_fc_lyr, 'SWITCH_SELECTION') DM.CalculateField(ws_fc_lyr, 'oncoast' ,"'N'", 'PYTHON') print("State assignment...") # States state_geo = r'D:\Continental_Limnology\Data_Working\LAGOS_US_GIS_Data_v0.6.gdb\Spatial_Classifications\state' find_states(ws_fc, STATES_GEO) # glaciation status? calc_glaciation(ws_fc, 'zoneid') # preface the names with the zones DM.DeleteField(ws_fc, 'ORIG_FID') fields = [f.name for f in arcpy.ListFields(ws_fc, '*') if f.type not in ('OID', 'Geometry') and not f.name.startswith('Shape_')] for f in fields: new_fname = '{zn}_{orig}'.format(zn=zone_name, orig = f).lower() try: DM.AlterField(ws_fc, f, new_fname, clear_field_alias = 'TRUE') # sick of debugging the required field message-I don't want to change required fields anyway except: pass # cleanup lyr_objects = [lyr_object for var_name, lyr_object in locals().items() if var_name.endswith('lyr')] for l in lyr_objects: DM.Delete(l)
#desc = arcpy.Describe(polygon_file) #sr = desc.spatialReference arcpy.env.outputCoordinateSystem = default_coord_sys arcpy.env.geographicTransformations = "GDA_1994_To_WGS_1984" arcpy.env.XYResolution = "0.0000000001 Meters" arcpy.env.XYTolerance = "0.0000000001 Meters" temp_xy = arcpy.CreateScratchName("xx", ".shp") add_msg_and_print("temp_xy is %s" % temp_xy) try: arcmgt.XYToLine(in_table, temp_xy, "Longitude_dd", "Lattitude_dd", "Longitude_dd_2", "Lattitude_dd_2", "GEODESIC", "New_WP") except: add_msg_and_print("Unable to create XY to line feature class") raise layer = "feat_layer" arcmgt.MakeFeatureLayer(temp_xy, layer) arcmgt.SelectLayerByLocation(layer, "COMPLETELY_WITHIN", polygon_file) arcmgt.SelectLayerByAttribute(layer, "SWITCH_SELECTION") temp_overlap = arcpy.CreateScratchName("xx_overlap_", ".shp") arcpy.CopyFeatures_management(layer, temp_overlap) # now we need to iterate over those overlapping vertices and integrate them with the boundary polygon print "Completed"
def lake_from_to(nhd_subregion_gdb, output_table): arcpy.env.workspace = 'in_memory' waterbody0 = os.path.join(nhd_subregion_gdb, 'NHDWaterbody') network = os.path.join(nhd_subregion_gdb, 'Hydrography', 'HYDRO_NET') junctions0 = os.path.join(nhd_subregion_gdb, 'HYDRO_NET_Junctions') # use layers for selections. We will only work with lakes over 1 hectare for this tool. waterbody = DM.MakeFeatureLayer(waterbody0, 'waterbody', where_clause=LAGOS_LAKE_FILTER) num_wbs = int(arcpy.GetCount_management(waterbody).getOutput(0)) junctions = DM.MakeFeatureLayer(junctions0, 'junctions') DM.SelectLayerByLocation(junctions, 'INTERSECT', waterbody, '1 Meters', 'NEW_SELECTION') junctions_1ha = DM.MakeFeatureLayer(junctions, 'junctions_1ha') # insert results into output table DM.CreateTable(os.path.dirname(output_table), os.path.basename(output_table)) DM.AddField(output_table, 'FROM_PERMANENT_ID', 'TEXT', field_length=40) DM.AddField(output_table, 'TO_PERMANENT_ID', 'TEXT', field_length=40) # create a dictionary to hold results in memory results = [] counter = 0 progress = .01 arcpy.AddMessage("Starting network tracing...") with arcpy.da.SearchCursor(waterbody, 'Permanent_Identifier') as cursor: for row in cursor: # set up a progress printer counter += 1 if counter >= float(num_wbs) * progress: progress += .01 arcpy.AddMessage("{}% complete...".format( round(progress * 100), 1)) # select this lake id = row[0] where_clause = """"{0}" = '{1}'""".format('Permanent_Identifier', id) this_waterbody = DM.MakeFeatureLayer(waterbody, 'this_waterbody', where_clause) # select junctions overlapping this lake. only the downstream one matters, rest have no effect DM.SelectLayerByLocation(junctions_1ha, 'INTERSECT', this_waterbody, '1 Meters') count_junctions = int( arcpy.GetCount_management(junctions_1ha).getOutput(0)) if count_junctions == 0: # add a row with no "TO" lake to the results results.append({'FROM': id, 'TO': None}) else: # copy with selection on this_junctions = DM.MakeFeatureLayer(junctions_1ha, 'this_junctions') DM.TraceGeometricNetwork(network, 'downstream', this_junctions, 'TRACE_DOWNSTREAM') # select lakes that intersect the downstream network with a tolerance of 1 meters DM.SelectLayerByLocation(waterbody, 'INTERSECT', 'downstream/NHDFlowline', '1 Meters', 'NEW_SELECTION') # remove this lake DM.SelectLayerByAttribute(waterbody, 'REMOVE_FROM_SELECTION', where_clause) # get the count, if it's 0 then there should be no table entry or something? count_waterbody = int( arcpy.GetCount_management(waterbody).getOutput(0)) # copy those into the table that you're storing stuff in if count_waterbody == 0: # add a row with no "TO" lake to the results results.append({'FROM': id, 'TO': None}) else: # for each ID, how am I getting those to_ids = [ row[0] for row in arcpy.da.SearchCursor( waterbody, 'Permanent_Identifier') ] for to_id in to_ids: result = {'FROM': id, 'TO': to_id} results.append(result) # delete all the intermediates DM.SelectLayerByAttribute(waterbody, 'CLEAR_SELECTION') for item in [this_waterbody, this_junctions, 'downstream']: DM.Delete(item) # insert the results in the table insert_cursor = arcpy.da.InsertCursor( output_table, ['FROM_PERMANENT_ID', 'TO_PERMANENT_ID']) for result in results: insert_cursor.insertRow([result['FROM'], result['TO']]) # delete everything for item in [waterbody, junctions, junctions_1ha, 'in_memory']: DM.Delete(item) arcpy.AddMessage("Completed.")
def georeference_lakes( lake_points_fc, out_fc, lake_id_field, lake_name_field, lake_county_field='', state='', master_gdb=r'C:\Users\smithn78\Dropbox\CL_HUB_GEO\Lake_Georeferencing\Masters_for_georef.gdb' ): """ Evaluate water quality sampling point locations and either assign the point to a lake polygon or flag the point for manual review. :param lake_points_fc: :param out_fc: :param lake_id_field: :param lake_name_field: :param lake_county_field: :param state: :param master_gdb: Location of master geodatabase used for linking :return: """ master_lakes_fc = os.path.join(master_gdb, MASTER_LAKES_FC) master_lakes_lines = os.path.join(master_gdb, MASTER_LAKES_LINES) master_streams_fc = os.path.join(master_gdb, MASTER_STREAMS_FC) master_xwalk = os.path.join(master_gdb, MASTER_XWALK) # setup arcpy.AddMessage("Joining...") state = state.upper() if state not in STATES: raise ValueError('Use the 2-letter state code abbreviation') arcpy.env.workspace = 'in_memory' out_short = os.path.splitext(os.path.basename(out_fc))[0] join1 = '{}_1'.format(out_short) join2 = '{}_2'.format(out_short) join3 = '{}_3'.format(out_short) join3_select = join3 + '_select' join4 = '{}_4'.format(out_short) join5 = '{}_5'.format(out_short) joinx = '{}_x'.format(out_short) county_name_results = arcpy.ListFields( lake_points_fc, '{}*'.format(lake_county_field))[0].name if lake_county_field and not lake_county_field in county_name_results: print('{} field does not exist in dataset.'.format(lake_county_field)) raise Exception point_fields = [f.name for f in arcpy.ListFields(lake_points_fc)] # update the lake id to a text field if not already lake_id_field_type = arcpy.ListFields(lake_points_fc, lake_id_field)[0].type if lake_id_field_type != 'String': temp_id_field = '{}_t'.format(lake_id_field) arcpy.AddField_management(lake_points_fc, '{}_t'.format(lake_id_field), 'TEXT', '255') expr = '!{}!'.format(lake_id_field) arcpy.CalculateField_management(lake_points_fc, temp_id_field, expr, 'PYTHON') arcpy.DeleteField_management(lake_points_fc, lake_id_field) arcpy.AlterField_management(lake_points_fc, temp_id_field, new_field_name=lake_id_field) # Try to make some spatial connections and fulfill some logic to assign a link join1 = AN.SpatialJoin(lake_points_fc, master_lakes_fc, join1, 'JOIN_ONE_TO_MANY', 'KEEP_ALL', match_option='INTERSECT') join2 = AN.SpatialJoin(join1, master_streams_fc, join2, 'JOIN_ONE_TO_MANY', 'KEEP_ALL', match_option='INTERSECT') join3 = AN.SpatialJoin(join2, master_lakes_fc, join3, 'JOIN_ONE_TO_MANY', 'KEEP_ALL', match_option='INTERSECT', search_radius='10 meters') join4 = AN.SpatialJoin(join3, master_lakes_fc, join4, 'JOIN_ONE_TO_MANY', 'KEEP_ALL', match_option='INTERSECT', search_radius='100 meters') # setup for editing lake assignment values DM.AddField(join4, 'Auto_Comment', 'TEXT', field_length=100) DM.AddField(join4, 'Manual_Review', 'SHORT') DM.AddField(join4, 'Shared_Words', 'TEXT', field_length=100) DM.AddField(join4, 'Linked_lagoslakeid', 'LONG') DM.AddField(join4, 'GEO_Discovered_Name', 'TEXT', field_length=255) DM.AddField(join4, 'Duplicate_Candidate', 'TEXT', field_length=1) DM.AddField(join4, 'Is_Legacy_Link', 'TEXT', field_length=1) update_fields = [ lake_id_field, lake_name_field, MASTER_LAKE_ID, MASTER_GNIS_NAME, # 0m match 'PERMANENT_IDENTIFIER_1', 'GNIS_NAME_1', # stream match MASTER_LAKE_ID + '_1', MASTER_GNIS_NAME + '_12', # 10m match MASTER_LAKE_ID + '_12', MASTER_GNIS_NAME + '_12_13', # 100m match 'Auto_Comment', 'Manual_Review', 'Shared_Words', 'Linked_lagoslakeid' ] # use a cursor to go through each point and evaluate its assignment cursor = arcpy.da.UpdateCursor(join4, update_fields) arcpy.AddMessage("Calculating link status...") for row in cursor: id, name, mid_0, mname_0, stream_id, streamname_0, mid_10, mname_10, mid_100, mname_100, comment, review, words, lagosid = row if mid_0 is not None: # if the point is directly in a polygon if name and mname_0: words = lagosGIS.list_shared_words(name, mname_0, exclude_lake_words=False) comment = 'Exact location link' lagosid = mid_0 review = -1 elif mid_0 is None and mid_10 is not None: # if the point is only within 10m of a lake if name and mname_10: words = lagosGIS.list_shared_words(name, mname_10, exclude_lake_words=False) if words: comment = 'Linked by common name and location' lagosid = mid_10 review = -1 else: comment = 'Linked by common location' lagosid = mid_10 review = 1 elif mid_0 is None and mid_10 is None: if stream_id is not None: # if there is a stream match comment = 'Not linked because represented as river in NHD' review = 2 else: if mid_100 is not None: # if the point is only within 100m of lake(s) if name and mname_100: words = lagosGIS.list_shared_words( name, mname_100, exclude_lake_words=True) # TODO: Frequency check if words: comment = 'Linked by common name and location' lagosid = mid_100 review = 1 else: comment = 'Linked by common location' lagosid = mid_100 review = 2 cursor.updateRow( (id, name, mid_0, mname_0, stream_id, streamname_0, mid_10, mname_10, mid_100, mname_100, comment, review, words, lagosid)) # # So I haven't been able to get the county logic to work and it hasn't been that important yet, ignore for now # Select down to a minimum set because we're about to join on county, which will create lots of duplicate matches # Then join calculated results back to full set # if lake_county_field: # join5 = AN.Select(join4, join5, 'Manual_Review IS NULL') # lakes_state = AN.Select(MASTER_LAKES_FC, 'lakes_state', "{0} = '{1}'".format(MASTER_STATE_NAME, state)) # lakes_state_lyr = DM.MakeFeatureLayer(lakes_state, 'lakes_state_lyr') # join5_lyr = DM.MakeFeatureLayer(join5, 'join5_lyr') # DM.AddJoin(join5_lyr, lake_county_field, lakes_state_lyr, MASTER_COUNTY_NAME) # join5_with_county = DM.CopyFeatures(join5_lyr, 'join5_with_cty') # j5 = 'DEDUPED_CA_SWAMP_data_linked_5.' # # county_update_fields = [j5 + lake_id_field, j5 + lake_name_field, j5 + lake_county_field, # 'lakes_state.' + MASTER_LAKE_ID, 'lakes_state.' + MASTER_GNIS_NAME, 'lakes_state.' + MASTER_COUNTY_NAME, # j5 + 'Auto_Comment', j5 + 'Manual_Review', j5 + 'Shared_Words', # j5 + 'Linked_lagoslakeid'] # with arcpy.da.UpdateCursor(join5_lyr, county_update_fields) as cursor: # for row in cursor: # id, name, county, mid_cty, mname_cty, mcounty, comment, review, words, lagosid = row # if county is not None and mcounty is not None: # if name and mname_cty: # words = lagosGIS.list_shared_words(name, mname_cty, exclude_lake_words=True) # if words: # comment = 'PRELIMINARY: Linked by common name and location' # lagosid = mid_cty # review = 2 # cursor.updateRow((id, name, county, mid_cty, mname_cty, mcounty, comment, review, words, lagosid)) # DM.RemoveJoin(join5_lyr) # join5_with_county = DM.CopyFeatures(join5_lyr, 'join5_with_county') # # # join5 = DM.JoinField(join5, lake_county_field, lakes_state, MASTER_COUNTY_NAME, # fields = [MASTER_COUNTY_NAME, MASTER_LAKE_ID, MASTER_GNIS_NAME]) # # # This is a long way to make a join # join_dict = {} # with arcpy.da.SearchCursor(lakes_state, [MASTER_COUNTY_NAME, MASTER_LAKE_ID, MASTER_GNIS_NAME]) as cursor: # for row in cursor: # join_value, val1, val2 = row # join_dict[join_value] = [val1, val2] # # arcpy.AddField_management(join5, MASTER_LAKE_ID + 'cntyj', 'LONG') # arcpy.AddField_management(join5, MASTER_GNIS_NAME + 'cntyj', 'TEXT', 255) # # with arcpy.da.SearchCursor(join5, [lake_county_field, MASTER_LAKE_ID + 'cntyj', MASTER_GNIS_NAME + 'cntyj']) as cursor: # for row in cursor: # key_value = row[0] # words = lagosGIS.list_shared_words() # if join_dict.has_key(key_value): # row[1] = join_dict[key_value][0] # row[2] = join_dict[key_value][1] # else: # row[1] = None # row[2] = None # cursor.updateRow(row) # # # county_update_fields = [lake_id_field, lake_name_field, lake_county_field, # MASTER_LAKE_ID + '_12_13_14', MASTER_GNIS_NAME + '_12_13', MASTER_COUNTY_NAME + '_12_13', # county # 'Auto_Comment', 'Manual_Review', 'Shared_Words', # 'Linked_lagoslakeid'] # cursor = arcpy.da.UpdateCursor(join5, county_update_fields) # for row in cursor: # id, name, county, lagosid_cty, lagosname_cty, mcounty, comment, mreview, words, linked_lagosid = row # if mcounty is not None: # words = lagosGIS.list_shared_words() # else: # join5 = join4 # if state in LAGOSNE_STATES: DM.JoinField(join4, lake_id_field, master_xwalk, 'lagosne_legacyid', ['lagoslakeid', 'lagos_lakename', 'lagos_state']) update_fields = [ lake_id_field, lake_name_field, MASTER_LAKE_ID + '_12_13', 'lagos_lakename', 'lagos_state', # crosswalk match 'Auto_Comment', 'Manual_Review', 'Shared_Words', 'Linked_lagoslakeid', 'Is_Legacy_Link' ] with arcpy.da.UpdateCursor(join4, update_fields) as uCursor: for uRow in uCursor: id, name, mid_x, mname_x, state_x, comment, review, words, lagosid, legacy_flag = uRow # fields are populated already from links above. Revise only if legacy links if mid_x is not None: if state == state_x: legacy_flag = 'Y' # set to Y regardless of whether using legacy comment if state matches if comment != 'Exact location link': review = 1 if state != state_x: review = 3 # downgrade if states mismatch--border lakes OK, random common IDs NOT. Check. legacy_flag = 'Y' comment = 'LAGOS-NE legacy link' # only comment non-exact location matches lagosid = mid_x if name and mname_x: words = lagosGIS.list_shared_words( name, mname_x) # update words only if legacy comment new_row = id, name, mid_x, mname_x, state_x, comment, review, words, lagosid, legacy_flag uCursor.updateRow(new_row) # # Undo the next line if you ever bring this chunk back. join5 = join4 # then re-code the no matches as a 3 and copy comments to the editable field # compress the joined lake ids into one field # having two fields lets us keep track of how many of the auto matches are bad if arcpy.ListFields(join5, 'Comment'): comment_field_name = 'Comment_LAGOS' else: comment_field_name = 'Comment' DM.AddField(join5, comment_field_name, 'TEXT', field_length=100) with arcpy.da.UpdateCursor( join5, ['Manual_Review', 'Auto_Comment', 'Comment']) as cursor: for flag, ac, comment in cursor: if flag is None: flag = 3 ac = 'Not linked' comment = ac cursor.updateRow((flag, ac, comment)) # Re-code points more than 100m into the polygon of the lake as no need to check DM.MakeFeatureLayer(join5, 'join5_lyr') DM.MakeFeatureLayer(master_lakes_lines, 'lake_lines_lyr') DM.SelectLayerByAttribute('join5_lyr', 'NEW_SELECTION', "Auto_Comment = 'Exact location link'") DM.SelectLayerByLocation('join5_lyr', 'INTERSECT', 'lake_lines_lyr', '100 meters', 'SUBSET_SELECTION', 'INVERT') DM.CalculateField('join5_lyr', 'Manual_Review', '-2', 'PYTHON') DM.Delete('join5_lyr', 'lake_lines_lyr') # Then make sure to only keep the fields necessary when you write to an output copy_fields = point_fields + [ 'Linked_lagoslakeid', 'Auto_Comment', 'Manual_Review', 'Is_Legacy_Link', 'Shared_Words', 'Comment', 'Duplicate_Candidate', 'GEO_Discovered_Name' ] copy_fields.remove('Shape') copy_fields.remove('OBJECTID') lagosGIS.select_fields(join5, out_fc, copy_fields) DM.AssignDomainToField(out_fc, 'Comment', 'Comment') DM.AddField(out_fc, 'Total_points_in_lake_poly', 'Short') # Remove any duplicates. (These originate from the join3/join4 transition because a point can be both # within 10m and 100m of lakes, this code takes the closest lake as true for my current sanity.) # Or, in other words, this is a hack solution. out_fc_fields = [ f.name for f in arcpy.ListFields(out_fc) if f.name != 'OBJECTID' ] DM.DeleteIdentical(out_fc, out_fc_fields) # Get the join_count for each limno lake ID # De-dupe anything resulting from limno ID duplicates first before counting id_pairs = list( set( arcpy.da.SearchCursor(out_fc, [lake_id_field, 'Linked_lagoslakeid']))) # THEN pull out LAGOS id. Any duplicate now are only due to multiple distinct points within lake lagos_ids = [ids[1] for ids in id_pairs] sample_ids = [ids[0] for ids in id_pairs] lagos_lake_counts = Counter(lagos_ids) linked_multiple_lake_counts = Counter(sample_ids) # Get the count of points in the polygon with arcpy.da.UpdateCursor( out_fc, ['Linked_lagoslakeid', 'Total_points_in_lake_poly']) as cursor: for lagos_id, join_count in cursor: join_count = lagos_lake_counts[lagos_id] cursor.updateRow((lagos_id, join_count)) # Mark any samples linked to more than one lake so that the analyst can select the correct lake in the # manual process with arcpy.da.UpdateCursor( out_fc, [lake_id_field, 'Duplicate_Candidate']) as cursor: for sample_id, duplicate_flag in cursor: duplicate_count = linked_multiple_lake_counts[sample_id] if duplicate_count > 1: duplicate_flag = "Y" else: duplicate_flag = "N" cursor.updateRow((sample_id, duplicate_flag)) # clean up DM.AddField(out_fc, 'Note', 'TEXT', field_length=140) DM.Delete('in_memory') arcpy.AddMessage('Completed.')
def process_zone(zone_fc, output, zone_name, zone_id_field, zone_name_field, other_keep_fields, clip_hu8, lagosne_name): # dissolve fields by the field that zone_id is based on (the field that identifies a unique zone) dissolve_fields = [ f for f in "{}, {}, {}".format(zone_id_field, zone_name_field, other_keep_fields).split(', ') if f != '' ] print("Dissolving...") dissolve1 = DM.Dissolve(zone_fc, 'dissolve1', dissolve_fields) # update name field to match our standard DM.AlterField(dissolve1, zone_name_field, 'name') # original area DM.AddField(dissolve1, 'originalarea', 'DOUBLE') DM.CalculateField(dissolve1, 'originalarea', '!shape.area@hectares!', 'PYTHON') #clip print("Clipping...") clip = AN.Clip(dissolve1, MASTER_CLIPPING_POLY, 'clip') if clip_hu8 == 'Y': final_clip = AN.Clip(clip, HU8_OUTPUT, 'final_clip') else: final_clip = clip print("Selecting...") # calc new area, orig area pct, compactness DM.AddField(final_clip, 'area_ha', 'DOUBLE') DM.AddField(final_clip, 'originalarea_pct', 'DOUBLE') DM.AddField(final_clip, 'compactness', 'DOUBLE') DM.JoinField(final_clip, zone_id_field, dissolve1, zone_id_field, 'originalarea_pct') uCursor_fields = [ 'area_ha', 'originalarea_pct', 'originalarea', 'compactness', 'SHAPE@AREA', 'SHAPE@LENGTH' ] with arcpy.da.UpdateCursor(final_clip, uCursor_fields) as uCursor: for row in uCursor: area, orig_area_pct, orig_area, comp, shape_area, shape_length = row area = shape_area / 10000 # convert from m2 to hectares orig_area_pct = round(100 * area / orig_area, 2) comp = 4 * 3.14159 * shape_area / (shape_length**2) row = (area, orig_area_pct, orig_area, comp, shape_area, shape_length) uCursor.updateRow(row) # if zones are present with <5% of original area and a compactness measure of <.2 (ranges from 0-1) # AND ALSO they are no bigger than 500 sq. km. (saves Chippewa County and a WWF), filter out # save eliminated polygons to temp database as a separate layer for inspection # Different processing for HU4 and HU8, so that they match the extent of HU8 more closely but still throw out tiny slivers # County also only eliminated if a tiny, tiny, tiny sliver (so: none should be eliminated) if zone_name not in ('hu4', 'hu12', 'county'): selected = AN.Select( final_clip, 'selected', "originalarea_pct >= 5 OR compactness >= .2 OR area_ha > 50000") not_selected = AN.Select( final_clip, '{}_not_selected'.format(output), "originalarea_pct < 5 AND compactness < .2 AND area_ha < 50000") else: selected = final_clip # eliminate small slivers, re-calc area fields, add perimeter and multipart flag # leaves the occasional errant sliver but some areas over 25 hectares are more valid so this is # CONSERVATIVE print("Trimming...") trimmed = DM.EliminatePolygonPart(selected, 'trimmed', 'AREA', '25 Hectares', part_option='ANY') # gather up a few calculations into one cursor because this is taking too long over the HU12 layer DM.AddField(trimmed, 'perimeter_m', 'DOUBLE') DM.AddField(trimmed, 'multipart', 'TEXT', field_length=1) uCursor_fields = [ 'area_ha', 'originalarea_pct', 'originalarea', 'perimeter_m', 'multipart', 'SHAPE@' ] with arcpy.da.UpdateCursor(trimmed, uCursor_fields) as uCursor: for row in uCursor: area, orig_area_pct, orig_area, perim, multipart, shape = row area = shape.area / 10000 # convert to hectares from m2 orig_area_pct = round(100 * area / orig_area, 2) perim = shape.length # multipart flag calc if shape.isMultipart: multipart = 'Y' else: multipart = 'N' row = (area, orig_area_pct, orig_area, perim, multipart, shape) uCursor.updateRow(row) # delete intermediate fields DM.DeleteField(trimmed, 'compactness') DM.DeleteField(trimmed, 'originalarea') print("Zone IDs....") # link to LAGOS-NE zone IDs DM.AddField(trimmed, 'zoneid', 'TEXT', field_length=40) trimmed_lyr = DM.MakeFeatureLayer(trimmed, 'trimmed_lyr') if lagosne_name: # join to the old master GDB path on the same master field and copy in the ids old_fc = os.path.join(LAGOSNE_GDB, lagosne_name) old_fc_lyr = DM.MakeFeatureLayer(old_fc, 'old_fc_lyr') if lagosne_name == 'STATE' or lagosne_name == 'COUNTY': DM.AddJoin(trimmed_lyr, zone_id_field, old_fc_lyr, 'FIPS') else: DM.AddJoin(trimmed_lyr, zone_id_field, old_fc_lyr, zone_id_field) # usually works because same source data # copy DM.CalculateField(trimmed_lyr, 'zoneid', '!{}.ZoneID!.lower()'.format(lagosne_name), 'PYTHON') DM.RemoveJoin(trimmed_lyr) # generate new zone ids old_ids = [row[0] for row in arcpy.da.SearchCursor(trimmed, 'zoneid')] with arcpy.da.UpdateCursor(trimmed, 'zoneid') as cursor: counter = 1 for row in cursor: if not row[ 0]: # if no existing ID borrowed from LAGOS-NE, assign a new one new_id = '{name}_{num}'.format(name=zone_name, num=counter) # ensures new ids don't re-use old numbers but fills in all positive numbers eventually while new_id in old_ids: counter += 1 new_id = '{name}_{num}'.format(name=zone_name, num=counter) row[0] = new_id cursor.updateRow(row) counter += 1 print("Edge flags...") # add flag fields DM.AddField(trimmed, 'onlandborder', 'TEXT', field_length=2) DM.AddField(trimmed, 'oncoast', 'TEXT', field_length=2) # identify border zones border_lyr = DM.MakeFeatureLayer(LAND_BORDER, 'border_lyr') DM.SelectLayerByLocation(trimmed_lyr, 'INTERSECT', border_lyr) DM.CalculateField(trimmed_lyr, 'onlandborder', "'Y'", 'PYTHON') DM.SelectLayerByAttribute(trimmed_lyr, 'SWITCH_SELECTION') DM.CalculateField(trimmed_lyr, 'onlandborder', "'N'", 'PYTHON') # identify coastal zones coastal_lyr = DM.MakeFeatureLayer(COASTLINE, 'coastal_lyr') DM.SelectLayerByLocation(trimmed_lyr, 'INTERSECT', coastal_lyr) DM.CalculateField(trimmed_lyr, 'oncoast', "'Y'", 'PYTHON') DM.SelectLayerByAttribute(trimmed_lyr, 'SWITCH_SELECTION') DM.CalculateField(trimmed_lyr, 'oncoast', "'N'", 'PYTHON') print("State assignment...") # State? DM.AddField(trimmed, "state", 'text', field_length='2') state_center = arcpy.SpatialJoin_analysis( trimmed, STATE_FC, 'state_center', join_type='KEEP_COMMON', match_option='HAVE_THEIR_CENTER_IN') state_intersect = arcpy.SpatialJoin_analysis(trimmed, STATE_FC, 'state_intersect', match_option='INTERSECT') state_center_dict = { row[0]: row[1] for row in arcpy.da.SearchCursor(state_center, ['ZoneID', 'STUSPS']) } state_intersect_dict = { row[0]: row[1] for row in arcpy.da.SearchCursor(state_intersect, ['ZoneID', 'STUSPS']) } with arcpy.da.UpdateCursor(trimmed, ['ZoneID', 'state']) as cursor: for updateRow in cursor: keyValue = updateRow[0] if keyValue in state_center_dict: updateRow[1] = state_center_dict[keyValue] else: updateRow[1] = state_intersect_dict[keyValue] cursor.updateRow(updateRow) # glaciation status? # TODO as version 0.6 # preface the names with the zones DM.DeleteField(trimmed, 'ORIG_FID') fields = [ f.name for f in arcpy.ListFields(trimmed, '*') if f.type not in ('OID', 'Geometry') and not f.name.startswith('Shape_') ] for f in fields: new_fname = '{zn}_{orig}'.format(zn=zone_name, orig=f).lower() try: DM.AlterField(trimmed, f, new_fname, clear_field_alias='TRUE') # sick of debugging the required field message-I don't want to change required fields anyway except: pass DM.CopyFeatures(trimmed, output) # cleanup lyr_objects = [ lyr_object for var_name, lyr_object in locals().items() if var_name.endswith('lyr') ] temp_fcs = arcpy.ListFeatureClasses('*') for l in lyr_objects + temp_fcs: DM.Delete(l)
def pmpAnalysis(aoiBasin, stormType, durList): ########################################################################### ## Create PMP Point Feature Class from points within AOI basin and add fields def createPMPfc(): arcpy.AddMessage( "\nCreating feature class: 'PMP_Points' in Scratch.gdb...") dm.MakeFeatureLayer( home + "\\Input\Non_Storm_Data.gdb\Vector_Grid", "vgLayer") # make a feature layer of vector grid cells dm.SelectLayerByLocation( "vgLayer", "INTERSECT", aoiBasin ) # select the vector grid cells that intersect the aoiBasin polygon dm.MakeFeatureLayer(home + "\\Input\Non_Storm_Data.gdb\Grid_Points", "gpLayer") # make a feature layer of grid points dm.SelectLayerByLocation( "gpLayer", "HAVE_THEIR_CENTER_IN", "vgLayer" ) # select the grid points within the vector grid selection con.FeatureClassToFeatureClass( "gpLayer", env.scratchGDB, "PMP_Points") # save feature layer as "PMP_Points" feature class arcpy.AddMessage("(" + str(dm.GetCount("gpLayer")) + " grid points will be analyzed)\n") # Add PMP Fields for dur in durList: arcpy.AddMessage("\t...adding field: PMP_" + str(dur)) dm.AddField(env.scratchGDB + "\\PMP_Points", "PMP_" + dur, "DOUBLE") # Add STORM Fields (this string values identifies the driving storm by SPAS ID number) for dur in durList: arcpy.AddMessage("\t...adding field: STORM_" + str(dur)) dm.AddField(env.scratchGDB + "\\PMP_Points", "STORM_" + dur, "TEXT", "", "", 16) return ########################################################################### ## Define getAOIarea() function: ## getAOIarea() calculates the area of AOI (basin outline) input shapefile/ ## featureclass. The basin outline shapefile must be projected. The area ## is sqaure miles, converted from the basin layers projected units (feet ## or meters). The aoiBasin feature class should only have a single feature ## (the basin outline). If there are multiple features, the area will be stored ## for the final feature only. def getAOIarea(): sr = arcpy.Describe( aoiBasin ).SpatialReference # Determine aoiBasin spatial reference system srname = sr.name srtype = sr.type srunitname = sr.linearUnitName # Units arcpy.AddMessage("\nAOI basin spatial reference: " + srname + "\nUnit type: " + srunitname + "\nSpatial reference type: " + srtype) aoiArea = 0.0 rows = arcpy.SearchCursor(aoiBasin) for row in rows: feat = row.getValue("Shape") aoiArea += feat.area if srtype == 'Geographic': # Must have a surface projection. If one doesn't exist it projects a temporary file and uses that. arcpy.AddMessage( "\n***The basin shapefile's spatial reference 'Geographic' is not supported. Projecting temporary shapefile for AOI.***" ) arcpy.Project_management( aoiBasin, env.scratchGDB + "\\TempBasin", 102039 ) #Projects AOI Basin (102039 = USA_Contiguous_Albers_Equal_Area_Conic_USGS_version) TempBasin = env.scratchGDB + "\\TempBasin" # Path to temporary basin created in scratch geodatabase sr = arcpy.Describe( TempBasin ).SpatialReference # Determine Spatial Reference of temporary basin aoiArea = 0.0 rows = arcpy.SearchCursor( TempBasin) # Assign area size in square meters for row in rows: feat = row.getValue("Shape") aoiArea += feat.area aoiArea = aoiArea * 0.000000386102 # Converts square meters to square miles elif srtype == 'Projected': # If a projection exists, it re-projects a temporary file and uses that for data consistency. arcpy.AddMessage( "\n***The basin shapefile's spatial reference will be reprojected to USA_Contiguous_Albers_Equal_Area_Conic_USGS_version for data consistency. Projecting temporary shapefile for AOI.***" ) arcpy.Project_management( aoiBasin, env.scratchGDB + "\\TempBasin", 102039 ) #Projects AOI Basin (102039 = USA_Contiguous_Albers_Equal_Area_Conic_USGS_version) TempBasin = env.scratchGDB + "\\TempBasin" # Path to temporary basin created in scratch geodatabase sr = arcpy.Describe( TempBasin ).SpatialReference # Determine Spatial Reference of temporary basin aoiArea = 0.0 rows = arcpy.SearchCursor( TempBasin) # Assign area size in square meters for row in rows: feat = row.getValue("Shape") aoiArea += feat.area aoiArea = aoiArea * 0.000000386102 # Converts square meters to square miles aoiArea = round(aoiArea, 3) arcpy.AddMessage("\nArea of interest: " + str(aoiArea) + " square miles.") if arcpy.GetParameter(5) == False: aoiArea = arcpy.GetParameter(6) # Enable a constant area size aoiArea = round(aoiArea, 1) arcpy.AddMessage("\n***Area used for PMP analysis: " + str(aoiArea) + " sqmi***") return aoiArea ########################################################################### ## Define dadLookup() function: ## The dadLookup() function determines the DAD value for the current storm ## and duration according to the basin area size. The DAD depth is interpolated ## linearly between the two nearest areal values within the DAD table. def dadLookup( stormLayer, duration, area ): # dadLookup() accepts the current storm layer name (string), the current duration (string), and AOI area size (float) #arcpy.AddMessage("\t\tfunction dadLookup() called.") durField = "H_" + duration # defines the name of the duration field (eg., "H_06" for 6-hour) dadTable = dadGDB + "\\" + stormLayer rows = arcpy.SearchCursor(dadTable) try: row = rows.next( ) # Sets DAD area x1 to the value in the first row of the DAD table. x1 = row.AREASQMI y1 = row.getValue(durField) xFlag = "FALSE" # xFlag will remain false for basins that are larger than the largest DAD area. except RuntimeError: # return if duration does not exist in DAD table return row = rows.next() i = 0 while row: # iterates through the DAD table - assiging the bounding values directly above and below the basin area size i += 1 if row.AREASQMI < area: x1 = row.AREASQMI y1 = row.getValue(durField) else: xFlag = "TRUE" # xFlag is switched to "TRUE" indicating area is within DAD range x2 = row.AREASQMI y2 = row.getValue(durField) break row = rows.next() del row, rows, i if xFlag == "FALSE": x2 = area # If x2 is equal to the basin area, this means that the largest DAD area is smaller than the basin and the resulting DAD value must be extrapolated. arcpy.AddMessage( "\t\tThe basin area size: " + str(area) + " sqmi is greater than the largest DAD area: " + str(x1) + " sqmi.\n\t\tDAD value is estimated by extrapolation.") y = x1 / x2 * y1 # y (the DAD depth) is estimated by extrapolating the DAD area to the basin area size. return y # The extrapolated DAD depth (in inches) is returned. # arcpy.AddMessage("\nArea = " + str(area) + "\nx1 = " + str(x1) + "\nx2 = " + str(x2) + "\ny1 = " + str(y1) + "\ny2 = " + str(y2)) x = area # If the basin area size is within the DAD table area range, the DAD depth is interpolated deltax = x2 - x1 # to determine the DAD value (y) at area (x) based on next lower (x1) and next higher (x2) areas. deltay = y2 - y1 diffx = x - x1 y = y1 + diffx * deltay / deltax if x < x1: arcpy.AddMessage( "\t\tThe basin area size: " + str(area) + " sqmi is less than the smallest DAD table area: " + str(x1) + " sqmi.\n\t\tDAD value is estimated by extrapolation.") return y # The interpolated DAD depth (in inches) is returned. ########################################################################### ## Define updatePMP() function: ## This function updates the 'PMP_XX_' and 'STORM_XX' fields of the PMP_Points ## feature class with the largest value from all analyzed storms stored in the ## pmpValues list. def updatePMP( pmpValues, stormID, duration ): # Accepts four arguments: pmpValues - largest adjusted rainfall for current duration (float list); stormID - driver storm ID for each PMP value (text list); and duration (string) pmpfield = "PMP_" + duration stormfield = "STORM_" + duration gridRows = arcpy.UpdateCursor( env.scratchGDB + "\\PMP_Points") # iterates through PMP_Points rows i = 0 for row in gridRows: row.setValue( pmpfield, pmpValues[i] ) # Sets the PMP field value equal to the Max Adj. Rainfall value (if larger than existing value). row.setValue( stormfield, stormID[i] ) # Sets the storm ID field to indicate the driving storm event gridRows.updateRow(row) i += 1 del row, gridRows, pmpfield, stormfield arcpy.AddMessage("\n\t" + duration + "-hour PMP values update complete. \n") return ########################################################################### ## The outputPMP() function produces raster GRID files for each of the PMP durations. ## Aslo, a space-delimited PMP_Distribition.txt file is created in the 'Text_Output' folder. def outputPMP(type, area, outPath): desc = arcpy.Describe(basin) basinName = desc.baseName pmpPoints = env.scratchGDB + "\\PMP_Points" # Location of 'PMP_Points' feature class which will provide data for output outType = type[:1] outArea = str(int(round(area, 0))) + "sqmi" outFC = outType + "_" + outArea #I don't think I need this..... arcpy.AddMessage("\nCopying PMP_Points feature class to " + outFC + "...") #outFC might be replaced with outpath... dm.Merge( pmpPoints, outPath ) # merge the scratch feature layer(s) of vector grid cells into the outputs arcpy.AddMessage("\nCreating Basin Summary Table...") tableName = type + "_PMP_Basin_Average" + "_" + outArea tablePath = env.scratchGDB + "\\" + tableName dm.CreateTable(env.scratchGDB, tableName) # Create blank table cursor = arcpy.da.InsertCursor( tablePath, "*") # Create Insert cursor and add a blank row to the table cursor.insertRow([0]) del cursor dm.AddField(tablePath, "STORM_TYPE", "TEXT", "", "", 10, "Storm Type") # Create "Storm Type" field dm.CalculateField(tablePath, "STORM_TYPE", "'" + type + "'", "PYTHON_9.3") # populate storm type field i = 0 for field in arcpy.ListFields( pmpPoints, "PMP_*" ): # Add fields for each PMP duration and calculate the basin average fieldName = field.name fieldAve = basinAve( basin, fieldName ) # Calls the basinAve() function - returns the average (weighted or not) dm.AddField(tablePath, fieldName, "DOUBLE", "", 2) # Add duration field dm.CalculateField(tablePath, fieldName, fieldAve, "PYTHON_9.3") # Assigns the basin average i += 1 arcpy.AddMessage("\nSummary table complete.") basAveTables.append(tablePath) return ########################################################################### ## The basin() returns the basin average PMP value for a given duration field. ## If the option for a weighted average is checked in the tool parameter the script ## will weight the grid point values based on proportion of area inside the basin. def basinAve(aoiBasin, pmpField): pmpPoints = env.scratchGDB + "\\PMP_Points" # Path of 'PMP_Points' scratch feature class if weightedAve: arcpy.AddMessage("\tCalculating basin average for " + pmpField + "(weighted)...") vectorGridClip = env.scratchGDB + "\\VectorGridClip" # Path of 'PMP_Points' scratch feature class sumstats = env.scratchGDB + "\\SummaryStats" dm.MakeFeatureLayer( home + "\\Input\Non_Storm_Data.gdb\\Vector_Grid", "vgLayer") # make a feature layer of vector grid cells dm.SelectLayerByLocation( "vgLayer", "INTERSECT", aoiBasin ) # select the vector grid cells that intersect the aoiBasin polygon an.Clip("vgLayer", aoiBasin, vectorGridClip) # clips aoi vector grid to basin dm.AddField( pmpPoints, "WEIGHT", "DOUBLE" ) # adds 'WEIGHT' field to PMP_Points scratch feature class dm.MakeFeatureLayer( vectorGridClip, "vgClipLayer" ) # make a feature layer of basin clipped vector grid cells dm.MakeFeatureLayer( pmpPoints, "pmpPointsLayer" ) # make a feature layer of PMP_Points feature class dm.AddJoin("pmpPointsLayer", "ID", "vgClipLayer", "ID") # joins PMP_Points and vectorGridBasin tables dm.CalculateField( "pmpPointsLayer", "WEIGHT", "!vectorGridClip.Shape_Area!", "PYTHON_9.3" ) # Calculates basin area proportion to use as weight for each grid cell. dm.RemoveJoin("pmpPointsLayer", "vectorGridClip") an.Statistics(pmpPoints, sumstats, [["WEIGHT", "SUM"]], "") stats = arcpy.SearchCursor(sumstats) pmpWgtAve = pmpField + "_WgtAve" for row in stats: calc = row.getValue("SUM_WEIGHT") express = "(!WEIGHT!/{})* !{}!".format(calc, pmpField) i = 0 for field in arcpy.ListFields(pmpPoints, pmpField): dm.AddField(pmpPoints, pmpWgtAve, "DOUBLE", 2) dm.CalculateField(pmpPoints, pmpWgtAve, express, "PYTHON_9.3") i += 1 del stats, row an.Statistics(pmpPoints, sumstats, [[pmpWgtAve, "SUM"]], "") sumwgtave = "SUM_" + pmpWgtAve with arcpy.da.SearchCursor(sumstats, sumwgtave) as stats: for row in stats: wgtAve = row[0] return round(wgtAve, 2) ## na = arcpy.da.TableToNumPyArray(pmpPoints,(pmpField, 'WEIGHT')) # Assign pmpPoints values and weights to Numpy array (na) ## wgtAve = numpy.average(na[pmpField], weights=na['WEIGHT']) # Calculate weighted average with Numpy average ## del na ## return round(wgtAve, 2) else: arcpy.AddMessage("\tCalculating basin average for " + pmpField + "(not weighted)...") sumstats = env.scratchGDB + "\\SummaryStats" an.Statistics(pmpPoints, sumstats, [[pmpField, "MEAN"]], "") mean = "MEAN_" + pmpField with arcpy.da.SearchCursor(sumstats, mean) as stats: for row in stats: fieldAve = row[0] return round(fieldAve, 2) ## na = arcpy.da.TableToNumPyArray(pmpPoints, pmpField) # Assign pmpPoints values to Numpy array (na) ## fieldAve = numpy.average(na[pmpField]) # Calculates aritmetic mean ## del na ## return round(fieldAve, 2) ########################################################################### ## This portion of the code iterates through each storm feature class in the ## 'Storm_Adj_Factors' geodatabase (evaluating the feature class only within ## the Local, Tropical, or general feature dataset). For each duration, ## at each grid point within the aoi basin, the transpositionality is ## confirmed. Then the DAD precip depth is retrieved and applied to the ## total adjustement factor to yield the total adjusted rainfall. This ## value is then sent to the updatePMP() function to update the 'PMP_Points' ## feature class. ##~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~## desc = arcpy.Describe( basin) # Check to ensure AOI input shape is a Polygon. If not - exit. basinShape = desc.shapeType if desc.shapeType == "Polygon": arcpy.AddMessage("\nBasin shape type: " + desc.shapeType) else: arcpy.AddMessage("\nBasin shape type: " + desc.shapeType) arcpy.AddMessage("\nError: Input shapefile must be a polygon!\n") sys.exit() createPMPfc( ) # Call the createPMPfc() function to create the PMP_Points feature class. env.workspace = adjFactGDB # the workspace environment is set to the 'Storm_Adj_Factors' file geodatabase aoiSQMI = round( getAOIarea(), 2 ) # Calls the getAOIarea() function to assign area of AOI shapefile to 'aoiSQMI' for dur in durList: stormList = arcpy.ListFeatureClasses( "", "Point", stormType ) # List all the total adjustment factor feature classes within the storm type feature dataset. arcpy.AddMessage( "\n*************************************************************\nEvaluating " + dur + "-hour duration...") pmpList = [] driverList = [] gridRows = arcpy.SearchCursor(env.scratchGDB + "\\PMP_Points") try: for row in gridRows: pmpList.append( 0.0 ) # creates pmpList of empty float values for each grid point to store final PMP values driverList.append( "STORM" ) # creates driverList of empty text values for each grid point to store final Driver Storm IDs del row, gridRows except UnboundLocalError: arcpy.AddMessage( "\n***Error: No data present within basin/AOI area.***\n") sys.exit() for storm in stormList: arcpy.AddMessage("\n\tEvaluating storm: " + storm + "...") dm.MakeFeatureLayer( storm, "stormLayer") # creates a feature layer for the current storm dm.SelectLayerByLocation( "stormLayer", "HAVE_THEIR_CENTER_IN", "vgLayer" ) # examines only the grid points that lie within the AOI gridRows = arcpy.SearchCursor("stormLayer") pmpField = "PMP_" + dur i = 0 try: dadPrecip = round(dadLookup(storm, dur, aoiSQMI), 3) arcpy.AddMessage("\t\t" + dur + "-hour DAD value: " + str(dadPrecip) + chr(34)) except TypeError: # In no duration exists in the DAD table - move to the next storm arcpy.AddMessage("\t***Duration '" + str(dur) + "-hour' is not present for " + str(storm) + ".***\n") continue arcpy.AddMessage( "\t\tComparing " + storm + " adjusted rainfall values against current driver values...\n") for row in gridRows: if row.TRANS == 1: # Only continue if grid point is transpositionable ('1' is transpostionable, '0' is not). try: # get total adj. factor if duration exists adjRain = round(dadPrecip * row.TAF, 1) if adjRain > pmpList[i]: pmpList[i] = adjRain driverList[i] = storm except RuntimeError: arcpy.AddMessage( "\t\t *Warning* Total Adjusted Raifnall value falied to set for row " + str(row.CNT)) break del adjRain i += 1 del row del storm, stormList, gridRows, dadPrecip updatePMP(pmpList, driverList, dur) # calls function to update "PMP Points" feature class del dur, pmpList arcpy.AddMessage( "\n'PMP_Points' Feature Class 'PMP_XX' fields update complete for all '" + stormType + "' storms.") outputPMP(stormType, aoiSQMI, outPath) # calls outputPMP() function del aoiSQMI return
def basinAve(aoiBasin, pmpField): pmpPoints = env.scratchGDB + "\\PMP_Points" # Path of 'PMP_Points' scratch feature class if weightedAve: arcpy.AddMessage("\tCalculating basin average for " + pmpField + "(weighted)...") vectorGridClip = env.scratchGDB + "\\VectorGridClip" # Path of 'PMP_Points' scratch feature class sumstats = env.scratchGDB + "\\SummaryStats" dm.MakeFeatureLayer( home + "\\Input\Non_Storm_Data.gdb\\Vector_Grid", "vgLayer") # make a feature layer of vector grid cells dm.SelectLayerByLocation( "vgLayer", "INTERSECT", aoiBasin ) # select the vector grid cells that intersect the aoiBasin polygon an.Clip("vgLayer", aoiBasin, vectorGridClip) # clips aoi vector grid to basin dm.AddField( pmpPoints, "WEIGHT", "DOUBLE" ) # adds 'WEIGHT' field to PMP_Points scratch feature class dm.MakeFeatureLayer( vectorGridClip, "vgClipLayer" ) # make a feature layer of basin clipped vector grid cells dm.MakeFeatureLayer( pmpPoints, "pmpPointsLayer" ) # make a feature layer of PMP_Points feature class dm.AddJoin("pmpPointsLayer", "ID", "vgClipLayer", "ID") # joins PMP_Points and vectorGridBasin tables dm.CalculateField( "pmpPointsLayer", "WEIGHT", "!vectorGridClip.Shape_Area!", "PYTHON_9.3" ) # Calculates basin area proportion to use as weight for each grid cell. dm.RemoveJoin("pmpPointsLayer", "vectorGridClip") an.Statistics(pmpPoints, sumstats, [["WEIGHT", "SUM"]], "") stats = arcpy.SearchCursor(sumstats) pmpWgtAve = pmpField + "_WgtAve" for row in stats: calc = row.getValue("SUM_WEIGHT") express = "(!WEIGHT!/{})* !{}!".format(calc, pmpField) i = 0 for field in arcpy.ListFields(pmpPoints, pmpField): dm.AddField(pmpPoints, pmpWgtAve, "DOUBLE", 2) dm.CalculateField(pmpPoints, pmpWgtAve, express, "PYTHON_9.3") i += 1 del stats, row an.Statistics(pmpPoints, sumstats, [[pmpWgtAve, "SUM"]], "") sumwgtave = "SUM_" + pmpWgtAve with arcpy.da.SearchCursor(sumstats, sumwgtave) as stats: for row in stats: wgtAve = row[0] return round(wgtAve, 2) ## na = arcpy.da.TableToNumPyArray(pmpPoints,(pmpField, 'WEIGHT')) # Assign pmpPoints values and weights to Numpy array (na) ## wgtAve = numpy.average(na[pmpField], weights=na['WEIGHT']) # Calculate weighted average with Numpy average ## del na ## return round(wgtAve, 2) else: arcpy.AddMessage("\tCalculating basin average for " + pmpField + "(not weighted)...") sumstats = env.scratchGDB + "\\SummaryStats" an.Statistics(pmpPoints, sumstats, [[pmpField, "MEAN"]], "") mean = "MEAN_" + pmpField with arcpy.da.SearchCursor(sumstats, mean) as stats: for row in stats: fieldAve = row[0] return round(fieldAve, 2)
def snap_points_to_mask_raster (in_file, mask, out_file, distance, workspace): if distance is None or len (distance) == 0: distance = "100 METERS" if arcpy.env.outputCoordinateSystem is None: arcpy.env.outputCoordinateSystem = mask print arcpy.env.outputCoordinateSystem.name if len(workspace): arcpy.env.workspace = workspace if arcpy.env.workspace is None or len(arcpy.env.workspace) == 0: arcpy.env.workspace = os.getcwd() arcpy.AddMessage ("workspace is %s" % arcpy.env.workspace) try: suffix = None wk = arcpy.env.workspace if not '.gdb' in wk: suffix = '.shp' poly_file = arcpy.CreateScratchName(None, suffix, 'POLYGON') arcpy.RasterToPolygon_conversion (mask, poly_file, 'NO_SIMPLIFY') except: raise arcpy.AddMessage ("poly_file is %s" % poly_file) # handle layers and datasets desc = arcpy.Describe(in_file) in_file = desc.catalogPath # add .shp extension if needed - clunky, but otherwise system fails below re_gdb = re.compile ('\.gdb$') re_shp = re.compile ('\.shp$') path = os.path.dirname(out_file) if len (path) == 0: path = arcpy.env.workspace if not re_gdb.search (path) and not re_shp.search (out_file): out_file += '.shp' arcpy.AddMessage ("Input point file is %s" % in_file) arcpy.AddMessage ("Output point file is %s" % out_file) arcmgt.CopyFeatures (in_file, out_file) try: snap_layer_name = 'get_layer_for_snapping' arcmgt.MakeFeatureLayer (out_file, snap_layer_name) arcmgt.SelectLayerByLocation (snap_layer_name, 'intersect', poly_file, '#', 'NEW_SELECTION') arcmgt.SelectLayerByAttribute(snap_layer_name, 'SWITCH_SELECTION') if arcmgt.GetCount(snap_layer_name) > 0: arcpy.Snap_edit (snap_layer_name, [[poly_file, "EDGE", distance]]) else: arcpy.AddMessage ('No features selected, no snapping applied') except Exception as e: print arcpy.GetMessages() raise e arcmgt.Delete (snap_layer_name) arcmgt.Delete (poly_file) print arcpy.GetMessages() print "Completed" return
def doFishnet(self): #### Initial Data Assessment #### printOHSSection(84428, prependNewLine=True) printOHSSubject(84431, addNewLine=False) #### Find Unique Locations #### msg = ARCPY.GetIDMessage(84441) ARCPY.SetProgressor("default", msg) initCount = UTILS.getCount(self.ssdo.inputFC) self.checkIncidents(initCount) collectedPointFC = UTILS.returnScratchName("Collect_InitTempFC") collInfo = EVENTS.collectEvents(self.ssdo, collectedPointFC) self.cleanUpList.append(collectedPointFC) collSSDO = SSDO.SSDataObject(collectedPointFC, explicitSpatialRef=self.ssdo.spatialRef, useChordal=True) collSSDO.obtainDataGA(collSSDO.oidName) ################################# if self.boundaryFC: #### Assure Boundary FC Has Area and Obtain Chars #### self.checkBoundary() #### Location Outliers #### lo = UTILS.LocationInfo(collSSDO, concept="EUCLIDEAN", silentThreshold=True, stdDeviations=3) printOHSLocationalOutliers(lo, aggType=self.aggType) #### Agg Header #### printOHSSection(84444) if self.boundaryFC: extent = self.boundExtent forMercExtent = self.boundExtent countMSGNumber = 84453 else: countMSGNumber = 84452 extent = None forMercExtent = collSSDO.extent if collSSDO.useChordal: extentFC_GCS = UTILS.returnScratchName("TempGCS_Extent") extentFC_Merc = UTILS.returnScratchName("TempMercator_Extent") points = NUM.array([[forMercExtent.XMin, forMercExtent.YMax], [forMercExtent.XMax, forMercExtent.YMin]]) UTILS.createPointFC(extentFC_GCS, points, spatialRef=collSSDO.spatialRef) DM.Project(extentFC_GCS, extentFC_Merc, mercatorProjection) d = ARCPY.Describe(extentFC_Merc) extent = d.extent fishOutputCoords = mercatorProjection else: fishOutputCoords = self.ssdo.spatialRef #### Fish Subject #### printOHSSubject(84449, addNewLine=False) dist = scaleDecision(lo.nonZeroAvgDist, lo.nonZeroMedDist) area = 0.0 #### Construct Fishnet #### fish = UTILS.FishnetInfo(collSSDO, area, extent, explicitCellSize=dist) dist = fish.quadLength snap = self.ssdo.distanceInfo.linearUnitString(dist) #### Cell Size Answer #### snapStr = self.ssdo.distanceInfo.printDistance(dist) msg = ARCPY.GetIDMessage(84450).format(snapStr) printOHSAnswer(msg) self.fish = fish #### Fishnet Count Subject #### printOHSSubject(84451, addNewLine=False) #### Create Temp Fishnet Grid #### gridFC = UTILS.returnScratchName("Fishnet_TempFC") self.cleanUpList.append(gridFC) #### Apply Output Coords to Create Fishnet #### oldSpatRef = ARCPY.env.outputCoordinateSystem ARCPY.env.outputCoordinateSystem = fishOutputCoords #### Fish No Extent #### oldExtent = ARCPY.env.extent ARCPY.env.extent = "" #### Apply Max XY Tolerance #### fishWithXY = UTILS.funWithXYTolerance(DM.CreateFishnet, self.ssdo.distanceInfo) #### Execute Fishnet #### fishWithXY(gridFC, self.fish.origin, self.fish.rotate, self.fish.quadLength, self.fish.quadLength, self.fish.numRows, self.fish.numCols, self.fish.corner, "NO_LABELS", self.fish.extent, "POLYGON") #### Project Back to GCS if Use Chordal #### if collSSDO.useChordal: gridFC_ProjBack = UTILS.returnScratchName("TempFC_Proj") DM.Project(gridFC, gridFC_ProjBack, collSSDO.spatialRef) UTILS.passiveDelete(gridFC) gridFC = gridFC_ProjBack #### Set Env Output Coords Back #### ARCPY.env.outputCoordinateSystem = oldSpatRef #### Create Empty Field Mappings to Ignore Atts #### fieldMap = ARCPY.FieldMappings() fieldMap.addTable(self.ssdo.inputFC) fieldMap.removeAll() #### Fishnet Count Answer #### printOHSAnswer(ARCPY.GetIDMessage(countMSGNumber)) #### Create Weighted Fishnet Grid #### tempFC = UTILS.returnScratchName("Optimized_TempFC") self.cleanUpList.append(tempFC) joinWithXY = UTILS.funWithXYTolerance(ANA.SpatialJoin, self.ssdo.distanceInfo) joinWithXY(gridFC, self.ssdo.inputFC, tempFC, "JOIN_ONE_TO_ONE", "KEEP_ALL", "EMPTY") #### Clean Up Temp FCs #### UTILS.passiveDelete(gridFC) #### Remove Locations Outside Boundary FC #### featureLayer = "ClippedPointFC" DM.MakeFeatureLayer(tempFC, featureLayer) if self.boundaryFC: msg = ARCPY.GetIDMessage(84454) ARCPY.SetProgressor("default", msg) DM.SelectLayerByLocation(featureLayer, "INTERSECT", self.boundaryFC, "#", "NEW_SELECTION") DM.SelectLayerByLocation(featureLayer, "INTERSECT", "#", "#", "SWITCH_SELECTION") DM.DeleteFeatures(featureLayer) else: if additionalZeroDistScale == "ALL": msg = ARCPY.GetIDMessage(84455) ARCPY.SetProgressor("default", msg) DM.SelectLayerByAttribute(featureLayer, "NEW_SELECTION", '"Join_Count" = 0') DM.DeleteFeatures(featureLayer) else: distance = additionalZeroDistScale * fish.quadLength distanceStr = self.ssdo.distanceInfo.linearUnitString( distance, convert=True) nativeStr = self.ssdo.distanceInfo.printDistance(distance) msg = "Removing cells further than %s from input pointsd...." ARCPY.AddMessage(msg % nativeStr) DM.SelectLayerByLocation(featureLayer, "INTERSECT", self.ssdo.inputFC, distanceStr, "NEW_SELECTION") DM.SelectLayerByLocation(featureLayer, "INTERSECT", "#", "#", "SWITCH_SELECTION") DM.DeleteFeatures(featureLayer) DM.Delete(featureLayer) del collSSDO ARCPY.env.extent = oldExtent self.createAnalysisSSDO(tempFC, "JOIN_COUNT")
def classify_lakes(nhd, out_feature_class, exclude_intermit_flowlines=False, debug_mode=False): if debug_mode: arcpy.env.overwriteOutput = True temp_gdb = cu.create_temp_GDB('classify_lake_connectivity') arcpy.env.workspace = temp_gdb arcpy.AddMessage('Debugging workspace located at {}'.format(temp_gdb)) else: arcpy.env.workspace = 'in_memory' if arcpy.Exists("temp_fc"): print("There is a problem here.") raise Exception # Tool temporary feature classes temp_fc = "temp_fc" csiwaterbody_10ha = "csiwaterbody_10ha" nhdflowline_filtered = "nhdflowline_filtered" dangles = "dangles" start = "start" end = "end" startdangles = "startdangles" enddangles = "enddangles" non_artificial_end = "non_artificial_end" flags_10ha_lake_junctions = "flags_10ha_lake_junctions" midvertices = "midvertices" non10vertices = "non10vertices" non10junctions = "non10junctions" all_non_flag_points = "all_non_flag_points" barriers = "barriers" trace1_junctions = "trace1_junctions" trace1_flowline = "trace1_flowline" trace2_junctions = "trace2junctions" trace2_flowline = "trace2_flowline" # Clean up workspace in case of bad exit from prior run in same session. this_tool_layers = [ "dangles_lyr", "nhdflowline_lyr", "junction_lyr", "midvertices_lyr", "all_non_flag_points_lyr", "non10vertices_lyr", "out_fc_lyr", "trace1", "trace2" ] this_tool_temp = [ temp_fc, csiwaterbody_10ha, nhdflowline_filtered, dangles, start, end, startdangles, enddangles, non_artificial_end, flags_10ha_lake_junctions, midvertices, non10vertices, non10junctions, all_non_flag_points, barriers, trace1_junctions, trace1_flowline, trace2_junctions, trace2_flowline ] for item in this_tool_layers + this_tool_temp: try: DM.Delete(item) except: pass # Local variables: nhdflowline = os.path.join(nhd, "Hydrography", "NHDFLowline") nhdjunction = os.path.join(nhd, "Hydrography", "HYDRO_NET_Junctions") nhdwaterbody = os.path.join(nhd, "Hydrography", "NHDWaterbody") network = os.path.join(nhd, "Hydrography", "HYDRO_NET") # Get lakes, ponds and reservoirs over a hectare. #csi_population_filter = '''"AreaSqKm" >=0.01 AND\ #"FCode" IN (39000,39004,39009,39010,39011,39012,43600,43613,43615,43617,43618,43619,43621)''' all_lakes_reservoirs_filter = '''"FType" IN (390, 436)''' # Can't see why we shouldn't just attribute all lakes and reservoirs # arcpy.Select_analysis(nhdwaterbody, "csiwaterbody", lake_population_filter) arcpy.AddMessage("Initializing output.") if exclude_intermit_flowlines: DM.CopyFeatures(out_feature_class, temp_fc) DM.Delete(out_feature_class) else: arcpy.Select_analysis(nhdwaterbody, temp_fc, all_lakes_reservoirs_filter) # Get lakes, ponds and reservoirs over 10 hectares. lakes_10ha_filter = '''"AreaSqKm" >= 0.1 AND "FType" IN (390, 436)''' arcpy.Select_analysis(nhdwaterbody, csiwaterbody_10ha, lakes_10ha_filter) # Exclude intermittent flowlines, if requested if exclude_intermit_flowlines: flowline_where_clause = '''"FCode" NOT IN (46003,46007)''' nhdflowline = arcpy.Select_analysis(nhdflowline, nhdflowline_filtered, flowline_where_clause) # Make dangle points at end of nhdflowline DM.FeatureVerticesToPoints(nhdflowline, dangles, "DANGLE") DM.MakeFeatureLayer(dangles, "dangles_lyr") # Isolate start dangles from end dangles. DM.FeatureVerticesToPoints(nhdflowline, start, "START") DM.FeatureVerticesToPoints(nhdflowline, end, "END") DM.SelectLayerByLocation("dangles_lyr", "ARE_IDENTICAL_TO", start) DM.CopyFeatures("dangles_lyr", startdangles) DM.SelectLayerByLocation("dangles_lyr", "ARE_IDENTICAL_TO", end) DM.CopyFeatures("dangles_lyr", enddangles) # Special handling for lakes that have some intermittent flow in and some permanent if exclude_intermit_flowlines: DM.MakeFeatureLayer(nhdflowline, "nhdflowline_lyr") DM.SelectLayerByAttribute("nhdflowline_lyr", "NEW_SELECTION", '''"WBArea_Permanent_Identifier" is null''') DM.FeatureVerticesToPoints("nhdflowline_lyr", non_artificial_end, "END") DM.SelectLayerByAttribute("nhdflowline_lyr", "CLEAR_SELECTION") arcpy.AddMessage("Found source area nodes.") # Get junctions from lakes >= 10 hectares. DM.MakeFeatureLayer(nhdjunction, "junction_lyr") DM.SelectLayerByLocation("junction_lyr", "INTERSECT", csiwaterbody_10ha, XY_TOLERANCE, "NEW_SELECTION") DM.CopyFeatures("junction_lyr", flags_10ha_lake_junctions) arcpy.AddMessage("Found lakes >= 10 ha.") # Make points shapefile and layer at flowline vertices to act as potential flags and/or barriers. arcpy.AddMessage("Tracing...") DM.FeatureVerticesToPoints(nhdflowline, midvertices, "MID") DM.MakeFeatureLayer(midvertices, "midvertices_lyr") # Get vertices that are not coincident with 10 hectare lake junctions. DM.SelectLayerByLocation("midvertices_lyr", "INTERSECT", flags_10ha_lake_junctions, "", "NEW_SELECTION") DM.SelectLayerByLocation("midvertices_lyr", "INTERSECT", flags_10ha_lake_junctions, "", "SWITCH_SELECTION") DM.CopyFeatures("midvertices_lyr", non10vertices) # Get junctions that are not coincident with 10 hectare lake junctions. DM.SelectLayerByLocation("junction_lyr", "INTERSECT", flags_10ha_lake_junctions, "", "NEW_SELECTION") DM.SelectLayerByLocation("junction_lyr", "INTERSECT", flags_10ha_lake_junctions, "", "SWITCH_SELECTION") DM.CopyFeatures("junction_lyr", non10junctions) # Merge non10vertices with non10junctions DM.Merge([non10junctions, non10vertices], all_non_flag_points) # inputs both point fc in_memory DM.MakeFeatureLayer(all_non_flag_points, "all_non_flag_points_lyr") # Tests the counts...for some reason I'm not getting stable behavior from the merge. mid_n = int(DM.GetCount(non10vertices).getOutput(0)) jxn_n = int(DM.GetCount(non10junctions).getOutput(0)) merge_n = int(DM.GetCount(all_non_flag_points).getOutput(0)) if merge_n < mid_n + jxn_n: arcpy.AddWarning( "The total number of flags ({0}) is less than the sum of the input junctions ({1}) " "and input midpoints ({2})".format(merge_n, jxn_n, mid_n)) # For tracing barriers, select all_non_flag_points points that intersect a 10 ha lake. DM.SelectLayerByLocation("all_non_flag_points_lyr", "INTERSECT", csiwaterbody_10ha, XY_TOLERANCE, "NEW_SELECTION") DM.CopyFeatures("all_non_flag_points_lyr", barriers) # Trace1-Trace downstream to first barrier (junctions+midvertices in 10 ha lake) starting from flags_10ha_lake_junctions flag points. DM.TraceGeometricNetwork(network, "trace1", flags_10ha_lake_junctions, "TRACE_DOWNSTREAM", barriers) # Save trace1 flowlines and junctions to layers on disk. DM.CopyFeatures("trace1\HYDRO_NET_Junctions", trace1_junctions) # extra for debugging DM.CopyFeatures("trace1\NHDFlowline", trace1_flowline) # Select vertice midpoints that intersect trace1 flowlines selection for new flags for trace2. DM.MakeFeatureLayer(non10vertices, "non10vertices_lyr") DM.SelectLayerByLocation("non10vertices_lyr", "INTERSECT", trace1_flowline, "", "NEW_SELECTION") # Trace2-Trace downstream from midpoints of flowlines that intersect the selected flowlines from trace1. DM.TraceGeometricNetwork(network, "trace2", "non10vertices_lyr", "TRACE_DOWNSTREAM") # Save trace1 flowlines and junctions to layers and then shapes on disk. DM.CopyFeatures("trace2\HYDRO_NET_Junctions", trace2_junctions) DM.CopyFeatures("trace2\NHDFlowline", trace2_flowline) # extra for debugging arcpy.AddMessage("Done tracing.") # Make shapefile for seepage lakes. (Ones that don't intersect flowlines) if exclude_intermit_flowlines: class_field_name = "Lake_Connectivity_Permanent" else: class_field_name = "Lake_Connectivity_Class" DM.AddField(temp_fc, class_field_name, "TEXT", field_length=13) DM.MakeFeatureLayer(temp_fc, "out_fc_lyr") DM.SelectLayerByLocation("out_fc_lyr", "INTERSECT", nhdflowline, XY_TOLERANCE, "NEW_SELECTION") DM.SelectLayerByLocation("out_fc_lyr", "INTERSECT", nhdflowline, "", "SWITCH_SELECTION") DM.CalculateField("out_fc_lyr", class_field_name, """'Isolated'""", "PYTHON") # New type of "Isolated" classification, mostly for "permanent" but there were some oddballs in "maximum" too DM.SelectLayerByLocation("out_fc_lyr", "INTERSECT", startdangles, XY_TOLERANCE, "NEW_SELECTION") DM.SelectLayerByLocation("out_fc_lyr", "INTERSECT", enddangles, XY_TOLERANCE, "SUBSET_SELECTION") DM.CalculateField("out_fc_lyr", class_field_name, """'Isolated'""", "PYTHON") # Get headwater lakes. DM.SelectLayerByLocation("out_fc_lyr", "INTERSECT", startdangles, XY_TOLERANCE, "NEW_SELECTION") DM.SelectLayerByAttribute( "out_fc_lyr", "REMOVE_FROM_SELECTION", '''"{}" = 'Isolated' '''.format(class_field_name)) DM.CalculateField("out_fc_lyr", class_field_name, """'Headwater'""", "PYTHON") # Select csiwaterbody that intersect trace2junctions arcpy.AddMessage("Beginning connectivity attribution...") DM.SelectLayerByLocation("out_fc_lyr", "INTERSECT", trace2_junctions, XY_TOLERANCE, "NEW_SELECTION") DM.CalculateField("out_fc_lyr", class_field_name, """'DrainageLk'""", "PYTHON") # Get stream drainage lakes. Either unassigned so far or convert "Headwater" if a permanent stream flows into it, # which is detected with "non_artificial_end" DM.SelectLayerByAttribute("out_fc_lyr", "NEW_SELECTION", '''"{}" IS NULL'''.format(class_field_name)) DM.CalculateField("out_fc_lyr", class_field_name, """'Drainage'""", "PYTHON") if exclude_intermit_flowlines: DM.SelectLayerByAttribute( "out_fc_lyr", "NEW_SELECTION", '''"{}" = 'Headwater' '''.format(class_field_name)) DM.SelectLayerByLocation("out_fc_lyr", "INTERSECT", non_artificial_end, XY_TOLERANCE, "SUBSET_SELECTION") DM.CalculateField("out_fc_lyr", class_field_name, """'Drainage'""", "PYTHON") # Prevent 'upgrades' due to very odd flow situations and artifacts of bad digitization. The effects of these # are varied--to avoid confusion, just keep the class assigned with all flowlines # 1--Purely hypothetical, not seen in testing DM.SelectLayerByAttribute( "out_fc_lyr", "NEW_SELECTION", '''"Lake_Connectivity_Class" = 'Isolated' AND "Lake_Connectivity_Permanent" <> 'Isolated' ''' ) DM.CalculateField("out_fc_lyr", class_field_name, """'Isolated'""", "PYTHON") # 2--Headwater to Drainage upgrade seen in testing with odd multi-inlet flow situation DM.SelectLayerByAttribute( "out_fc_lyr", "NEW_SELECTION", '''"Lake_Connectivity_Class" = 'Headwater' AND "Lake_Connectivity_Permanent" IN ('Drainage', 'DrainageLk') ''' ) DM.CalculateField("out_fc_lyr", class_field_name, """'Headwater'""", "PYTHON") # 3--Drainage to DrainageLk upgrade seen in testing when intermittent stream segments were used # erroneously instead of artificial paths DM.SelectLayerByAttribute( "out_fc_lyr", "NEW_SELECTION", '''"Lake_Connectivity_Class" = 'Drainage' AND "Lake_Connectivity_Permanent" = 'DrainageLk' ''' ) DM.CalculateField("out_fc_lyr", class_field_name, """'Drainage'""", "PYTHON") DM.SelectLayerByAttribute("out_fc_lyr", "CLEAR_SELECTION") # Add change flag for users DM.AddField(temp_fc, "Lake_Connectivity_Fluctuates", "Text", field_length="1") flag_codeblock = """def flag_calculate(arg1, arg2): if arg1 == arg2: return 'N' else: return 'Y'""" expression = 'flag_calculate(!Lake_Connectivity_Class!, !Lake_Connectivity_Permanent!)' DM.CalculateField(temp_fc, "Lake_Connectivity_Fluctuates", expression, "PYTHON", flag_codeblock) # Project output once done with both. Switching CRS earlier causes trace problems. if not exclude_intermit_flowlines: DM.CopyFeatures(temp_fc, out_feature_class) else: DM.Project(temp_fc, out_feature_class, arcpy.SpatialReference(102039)) # Clean up if not debug_mode: for item in this_tool_layers + this_tool_temp: if arcpy.Exists(item): DM.Delete(item) if not debug_mode: DM.Delete("trace1") DM.Delete("trace2") arcpy.AddMessage("{} classification is complete.".format(class_field_name))