def main(): # Initialize variables and file locations arcpy.env.overwriteOutput = True watershed_folders = get_watershed_folders(root_folder) projectwide_output = os.path.join(root_folder, "00_ProjectWide", "Inputs", "Data_Networks") projectwide_network = os.path.join(root_folder, "00_ProjectWide", "Inputs", "Stream_Network", "Stream_Network.shp") delete_old(projectwide_output) total_count = len(data_networks_list_in) network_names = [] for _ in data_networks_list_in: network_names.append([]) for data_network, network_slot in zip(data_networks_list_in, network_names): name = data_network.split("\\")[-1] new_name = name.replace(".shp", "") network_slot.append(new_name) network_slot.append(data_network) sorted_list = sorted(network_names, key=lambda s: s[0].lower()) for watershed in watershed_folders: # Clear old data delete_old(os.path.join(watershed, "Inputs", "Data_Networks")) for current_count, network_data in enumerate(sorted_list): name = network_data[0] network = network_data[1] arcpy.AddMessage("\nSaving {} Files ({}/{})...".format(name, current_count+1, total_count)) if '.shp' not in name: name += '.shp' for watershed in watershed_folders: arcpy.AddMessage("\tStarting " + watershed + "...") # Get network to clip by old_stream_network = os.path.join(watershed, "Inputs", "Stream_Network", "Stream_Network.shp") # Clip the current data network to this watershed new_network_save = os.path.join(watershed, "Inputs", "Data_Networks", name) arcpy.Clip_analysis(network, old_stream_network, new_network_save) # Don't create an empty shapefile if is_empty(new_network_save): arcpy.AddMessage("Did not save {}, as it was empty".format(new_network_save)) arcpy.Delete_management(new_network_save) arcpy.AddMessage("\tSaving Projectwide...") new_network_save = os.path.join(projectwide_output, name) arcpy.Clip_analysis(network, projectwide_network, new_network_save) finish()
def main(): # Initialize variables and file locations arcpy.env.overwriteOutput = True watershed_folders = get_watershed_folders(root_folder) projectwide = os.path.join(root_folder, "00_ProjectWide", "Outputs", "Extracted_Data", "Extraction_Merge_Points.shp") for watershed in watershed_folders: arcpy.AddMessage("\tStarting " + watershed + "...") boundary = os.path.join(watershed, "Inputs", "Watershed_Boundary", "Watershed_Boundary.shp") # Clip the current data network to this watershed out_folder = os.path.join(watershed, "Outputs", "Extracted_Data") delete_old(out_folder) new_save_location = os.path.join(out_folder, "Extraction_Merge_Points.shp") arcpy.Clip_analysis(projectwide, boundary, new_save_location)
def main(): # Initialize variables and file locations arcpy.env.overwriteOutput = True watershed_folders = get_watershed_folders(root_folder) watershed_folders.insert(0, os.path.join(root_folder, "00_Projectwide")) for watershed in watershed_folders: arcpy.AddMessage("Working on {}...".format(watershed)) in_folder = os.path.join(watershed, "Outputs", "Comparisons", "Numerical") out_folder = make_folder(os.path.join(watershed, "Outputs", "Comparisons", "Numerical"), "Plots") delete_old(in_folder, '.png') delete_old(out_folder, '.png') data_csv = os.path.join(in_folder, "Numerical_Comparison_Data.csv") outliers_csv = os.path.join(root_folder, "00_Projectwide", "Outputs", "Comparisons", "Numerical", "Outliers.csv") outlier_fields, outlier_reaches_list = read_outliers_csv(outliers_csv) pnet_names, pnet_fields, field_names, field_db_fields, new_fields_initial, pnet_valid, field_valid = read_field_csv(input_field_csv) # Plot Data create_plots(pnet_names, pnet_fields, field_names, field_db_fields, new_fields_initial, out_folder, data_csv, pnet_valid, field_valid, outlier_fields, outlier_reaches_list)
def main(): # Initialize variables and file locations arcpy.env.overwriteOutput = True watershed_folders = get_watershed_folders(root_folder) # Setup projectwide data projectwide_output = make_folder( os.path.join(root_folder, "00_ProjectWide", "Outputs", "Comparisons"), "Numerical") save_db(field_db, os.path.join(root_folder, "00_ProjectWide")) delete_old(projectwide_output) keep_fields = [ "FID", "Shape", "POINT_X", "POINT_Y", "SnapDist", "FldRchLen", "EcoRgn_L4", "EcoRgn_L3", "HUC8", "NAME", "StreamName", "PRECIP", "DRAREA", "iGeo_ElMax", "iGeo_ElMin" ] to_merge = [] # Set the field lists to the values from the fields value pnet_fields, field_db_fields, new_fields_initial = read_field_csv( input_field_csv) for watershed in watershed_folders: old_pnet_fields = pnet_fields old_field_db_fields = field_db_fields old_new_fields_initial = new_fields_initial arcpy.AddMessage("Working on {}...".format(watershed)) arcpy.AddMessage("\t Combining Data...") # Setup watershed data watershed_output = make_folder( os.path.join(watershed, "Outputs", "Comparisons"), "Numerical") delete_old(watershed_output) # Get the CSV with extracted PNET data watershed_pnet = os.path.join(watershed, "Outputs", "Extracted_Data", "All_Data.csv") # Get data from the PNET output pnet_data_list = csv_to_list(watershed_pnet) # Find certain PNET indexes in the PNET output id_pnet = pnet_data_list[0].index("""RchID""") pnet_indexes = [] missing_field_indexes = [] for pnet_field in old_pnet_fields: if pnet_field in pnet_data_list[0]: pnet_indexes.append(pnet_data_list[0].index(pnet_field)) else: missing_field_indexes.append(old_pnet_fields.index(pnet_field)) # remove headers pnet_data_list.pop(0) # Create a list with only necessary data pnet_compare_list = [] for row in pnet_data_list: to_add = [] # Add id column to_add.append(row[id_pnet]) # Add any other columns for index in pnet_indexes: to_add.append(row[index]) pnet_compare_list.append(to_add) # Get the CSV with Field data watershed_db = save_db(field_db, watershed) # Get data from the field database field_data_list = csv_to_list(watershed_db) # Find certain field indexes in the field database id_field_db = field_data_list[0].index("""RchID""") field_indexes_db = [] for field_db_field in old_field_db_fields: if old_field_db_fields.index( field_db_field) not in missing_field_indexes: field_indexes_db.append( field_data_list[0].index(field_db_field)) # remove headers field_data_list.pop(0) # Create a list with only necessary data field_compare_list = [] for row in field_data_list: to_add = [] # Add id column to_add.append(row[id_field_db]) # Add any other columns for index in field_indexes_db: to_add.append(row[index]) field_compare_list.append(to_add) # Make list of new fields new_fields = ["""RchID"""] for new_field in old_new_fields_initial: if old_new_fields_initial.index( new_field) not in missing_field_indexes: # make sure the field can fit into an arcmap field # This is where PNET data will go new_fields.append("pn_" + new_field[:7]) # This is where field database data will go new_fields.append("fd_" + new_field[:7]) # This is where actual difference data will go #new_fields.append("df_" + new_field[:7]) # This is where percent difference data will go #new_fields.append("pf_" + new_field[:7]) # This is where ratio data will go #new_fields.append("ro_" + new_field[:7]) # Perform data comparisons both_compare_list = [new_fields] arcpy.AddMessage("\t Creating New Fields...") for pnet_row in pnet_compare_list: new_row = [] # Get the ID of the current row current_site = pnet_row[0] # Find the corresponding row in the field data list for db_row_num, db_row in enumerate(field_compare_list): # If the two site are the same if db_row[0] == current_site: field_row = db_row break # Add the reach ID to our new row new_row.append(pnet_row[0]) # Prepare to iterate through each column of data, skipping rchID pnet_iter = iter(pnet_row) field_iter = iter(field_row) next(pnet_iter) next(field_iter) for pnet_data, field_data, in zip(pnet_iter, field_iter): # Make sure that the data is not missing if pnet_data != "" and field_data != "": # Add data into the new row pnet_num = float(pnet_data) field_num = float(field_data) new_row.append(pnet_num) new_row.append(field_num) # Add actual difference field #new_row.append(pnet_num-field_num) # Add percent difference field #if field_num > 0 or field_num < 0: # if pnet_num > field_num: # new_row.append((pnet_num-field_num)/pnet_num) # else: # new_row.append((field_num-pnet_num)/field_num) # #Add ratio field # new_row.append(float(pnet_num/field_num)) #else: # new_row.append(-999) else: new_row += [-999, -999] both_compare_list.append(new_row) # Add in data for each of the other PNET fields pnet_data_list = csv_to_list(watershed_pnet) for row_num, row in enumerate(both_compare_list): # Add data from each PNET field data_to_add = [] for add_field in keep_fields: if add_field in pnet_data_list[0]: this_index = pnet_data_list[0].index(add_field) data_to_add.append(pnet_data_list[row_num][this_index]) both_compare_list[row_num] = data_to_add + row # Create a new shapefile to hold data template = os.path.join(watershed, "Outputs", "Extracted_Data", "Extraction_Merge_Points.shp") comparison_points = arcpy.CreateFeatureclass_management( watershed_output, "Numerical_Comparison_Points.shp", "POINT", spatial_reference=template) # Add in new fields to the shapefile for count, (field, example) in enumerate( zip(both_compare_list[0], both_compare_list[1])): arcpy.AddMessage("\t\t Adding Field {} ({}/{})...".format( field, count + 1, len(both_compare_list[0]))) # Make sure we are not adding in any already existing default fields shapefile_fields = get_fields(comparison_points) if field not in shapefile_fields: # Decide to add a text or float field if isinstance(example, str): arcpy.AddField_management(comparison_points, field, "TEXT") else: arcpy.AddField_management(comparison_points, field, "FLOAT") elif count > 2: arcpy.AddMessage( "\t\t\t Reminder: All new name fields need to be unique within the first 7 characters" ) # Skip headers iter_list = iter(both_compare_list) next(iter_list) # remove useless field arcpy.DeleteField_management(comparison_points, "Id") arcpy.AddMessage("\t Creating Shapefile...") # Add in data to the shapefile with arcpy.da.InsertCursor(comparison_points, '*') as inserter: with arcpy.da.SearchCursor(template, '*') as searcher: for row, search_row in zip(iter_list, searcher): row[1] = search_row[1] inserter.insertRow(row) to_merge.append(comparison_points) # Save as CSV create_csv( os.path.join(watershed_output, "Numerical_Comparison_Data.csv"), comparison_points) arcpy.AddMessage('Saving ProjectWide...') merged = arcpy.Merge_management( to_merge, os.path.join(projectwide_output, "Numerical_Comparison_Points.shp")) create_csv( os.path.join(projectwide_output, "Numerical_Comparison_Data.csv"), merged)
def main(): # Initialize variables and file locations arcpy.env.overwriteOutput = True watershed_folders = get_watershed_folders(root_folder) projectwide_output = os.path.join(root_folder, "00_ProjectWide", "Outputs", "Extracted_Data") delete_old(projectwide_output) to_merge_points = [] req_fields = ["RchID", "FID", "Shape"] # This loops for each watershed folder for watershed in watershed_folders: arcpy.AddMessage("Working on {}...".format(watershed)) # Initialize list of all unique data networks within this watershed point_list = get_data_points(watershed) output_folder = os.path.join(watershed, "Outputs", "Extracted_Data") delete_old(output_folder) # Create temporary shapefiles to store spatially joined data all_joined = os.path.join(output_folder, "temp.shp") # Join the first and second network's data together, and store them into a temporary shapefile arcpy.AddMessage("\t Merging first points...") arcpy.Copy_management(point_list[0], all_joined) all_fields = get_fields(all_joined) for field in req_fields: if field in all_fields: all_fields.remove(field) point_list.pop(0) # Check to make sure there are still networks to join if len(point_list) > 0: # This repeats for each of the two remaining networks for data in point_list: arcpy.AddMessage("\t\tMerging more points...") data_temp = os.path.join(output_folder, "data_temp.shp") arcpy.Copy_management(data, data_temp) data = data_temp remove_existing_fields(all_fields, data) # Join the current network to the previous network containing all other data arcpy.JoinField_management(all_joined, "RchID", data, "RchID") arcpy.DeleteField_management(all_joined, "RchID_1") all_fields = get_fields(all_joined) for field in req_fields: if field in all_fields: all_fields.remove(field) # Save the output into the correct folder save = arcpy.Copy_management( all_joined, os.path.join(output_folder, "Extraction_Merge_Points.shp")) to_merge_points.append(save) create_csv(os.path.join(output_folder, "All_Data.csv"), save) # Delete both temp shapefiles arcpy.Delete_management(all_joined) arcpy.Delete_management(data_temp) arcpy.AddMessage("Working on Projectwide...") make_csv = arcpy.Merge_management( to_merge_points, os.path.join(projectwide_output, "Extraction_Merge_Points.shp")) create_csv(os.path.join(projectwide_output, "All_Data.csv"), make_csv) finish()
def main(): # Initialize variables arcpy.env.overwriteOutput = True watershed_folders = get_watershed_folders(root_folder) delete_old( os.path.join(root_folder, "00_ProjectWide", "Intermediates", "Reach_Editing", "Inputs")) project_networks = [] project_points = [] temps_to_delete = [] if fixed_points: network = os.path.join(root_folder, "00_ProjectWide", "Inputs", "Stream_Network", "Stream_Network.shp") fixed_folder = os.path.join(root_folder, "00_ProjectWide", "Intermediates", "Points", "Unsnapped_Fixed") save_fixed_points(network, fixed_folder, watershed_folders) # For each watershed: for watershed_folder in watershed_folders: arcpy.AddMessage("Starting {}...".format(watershed_folder)) # Get all file names output_folder = os.path.join(watershed_folder, "Intermediates", "Reach_Editing", "Inputs") network = os.path.join(watershed_folder, "Inputs", "Stream_Network", "Stream_Network.shp") delete_old(output_folder) new_tor_filename = "temp_tor.shp" new_tor_points = os.path.join(watershed_folder, new_tor_filename) temps_to_delete.append(new_tor_points) new_bor_filename = "temp_bor.shp" new_bor_points = os.path.join(watershed_folder, new_bor_filename) temps_to_delete.append(new_bor_points) old_tor_points = os.path.join(watershed_folder, "Intermediates", "Points", "Snapped", "TOR_Points_Snapped.shp") old_bor_points = os.path.join(watershed_folder, "Intermediates", "Points", "Snapped", "BOR_Points_Snapped.shp") if fixed_points: # Merge the now fixed points with the snapped points, and use this going forward tor_temp_name = "temp_tor_merge.shp" tor_temp_merge = os.path.join(watershed_folder, tor_temp_name) tor_fixed = \ os.path.join(watershed_folder, "Intermediates", "Points", "Unsnapped_Fixed", "TOR_Points_Fixed.shp") if not is_empty(tor_fixed): arcpy.Merge_management([tor_fixed, old_tor_points], tor_temp_merge) temps_to_delete.append(tor_temp_merge) old_tor_points = tor_temp_merge bor_temp_name = "temp_bor_merge.shp" bor_temp_merge = os.path.join(watershed_folder, bor_temp_name) bor_fixed = \ os.path.join(watershed_folder, "Intermediates", "Points", "Unsnapped_Fixed", "BOR_Points_Fixed.shp") if not is_empty(bor_fixed): arcpy.Merge_management([bor_fixed, old_bor_points], bor_temp_merge) temps_to_delete.append(bor_temp_merge) old_bor_points = bor_temp_merge arcpy.CopyFeatures_management(old_tor_points, new_tor_points) arcpy.CopyFeatures_management(old_bor_points, new_bor_points) points_list = [new_tor_points, new_bor_points] tor_bor_list = ("\"TOR\"", "\"BOR\"") # This loops once for TOR points, once for BOR points for points, tor_bor in zip(points_list, tor_bor_list): # Add and populate TOR_BOR Field arcpy.AddField_management(points, "TOR_BOR", "TEXT") arcpy.CalculateField_management(points, "TOR_BOR", tor_bor) # Merge TOR_BOR Points merge_location = os.path.join(watershed_folder, "Intermediates", "Reach_Editing", "Inputs", "Points_Merge.shp") merge_edit_location = os.path.join(watershed_folder, "Intermediates", "Reach_Editing", "Outputs", "Points_Merge_To_Edit.shp") arcpy.Merge_management(points_list, merge_location) arcpy.Merge_management(points_list, merge_edit_location) project_points.append(merge_location) # Dissolve the network new_network = os.path.join(watershed_folder, "temp_network.shp") temps_to_delete.append(new_network) arcpy.Dissolve_management(network, new_network) network = new_network new_network = os.path.join(watershed_folder, "temp_network2.shp") temps_to_delete.append(new_network) # Split network at points arcpy.SplitLineAtPoint_management(network, merge_location, new_network, "10 METERS") network = new_network network_layer = "Network" arcpy.MakeFeatureLayer_management(network, network_layer) # Make a new layer of only segments that intersect the field points arcpy.SelectLayerByLocation_management\ (network_layer, 'INTERSECT', merge_location) save_location = os.path.join(watershed_folder, "Intermediates", "Reach_Editing", "Inputs", "Stream_Network_Segments.shp") edit_location = os.path.join(watershed_folder, "Intermediates", "Reach_Editing", "Outputs", "Stream_Network_Segments_To_Edit.shp") arcpy.CopyFeatures_management(network_layer, save_location) arcpy.CopyFeatures_management(network_layer, edit_location) project_networks.append(save_location) arcpy.AddMessage("Saving ProjectWide...") make_projectwide(root_folder, project_points, project_networks) delete_temps(temps_to_delete) finish()
def main(): # Initialize variables and file locations arcpy.env.overwriteOutput = True watershed_folders = get_watershed_folders(root_folder) projectwide_output = os.path.join(root_folder, "00_ProjectWide", "Intermediates", "Extraction", "Outputs") delete_old(projectwide_output) to_merge = [] # Add a bunch of blank lists to the to_merge list, one space for each data network type for _ in get_data_networks(watershed_folders[0]): to_merge.append([]) to_delete = [] # This loops for every watershed for watershed_folder in watershed_folders: output_folder = os.path.join(watershed_folder, "Intermediates", "Extraction", "Outputs") network_list = get_data_networks(watershed_folder) arcpy.AddMessage("Starting " + watershed_folder + "...") for data_network_count, data_network in enumerate(network_list): if not is_empty(data_network): old_reaches = os.path.join(watershed_folder, "Intermediates", "Extraction", "Inputs", "Field_Reaches_Clean.shp") # Create a name for this data network so it can have a unique save location data_network_name = data_network.replace( os.path.join(watershed_folder, "Inputs", "Data_Networks"), "") data_network_name = data_network_name.replace(".shp", "") data_network_name = data_network_name.replace("\\", "") arcpy.AddMessage("\tStarting {}...".format(data_network_name)) data_network_folder = make_folder(output_folder, data_network_name) delete_old(data_network_folder) reaches = os.path.join(data_network_folder, "Reaches_Temp.shp") to_delete.append(reaches) reaches_save = os.path.join( data_network_folder, data_network_name + "_Points_Extracted.shp") arcpy.CopyFeatures_management(old_reaches, reaches) # Clip the data network to the Field reaches. This is important for calculating the math later clip = os.path.join(data_network_folder, "Clip_Temp.shp") to_delete.append(clip) arcpy.Clip_analysis(data_network, reaches, clip) clipped_data_network = clip # Adds field CLIP_LEN, which is the length of the clipped data segment arcpy.AddField_management(clipped_data_network, "CLIP_LEN", "DOUBLE") arcpy.CalculateField_management(clipped_data_network, "CLIP_LEN", "!shape.length@meters!", "PYTHON_9.3", "") # These fields is unnecessary and causes issues with merging, so they are deleted field_names = [ f.name for f in arcpy.ListFields(clipped_data_network) ] fields_to_delete = ["Join_Count", "TARGET_FID", "Join_Cou_1"] for field in fields_to_delete: if field in field_names: arcpy.DeleteField_management(clipped_data_network, field) data_network_fields = get_fields(data_network) pnet_fields = get_fields(old_reaches) fields_to_keep = pnet_fields + data_network_fields # Extracts data from the data network to PIBO reaches using a weighted average system. extract_network(reaches, clipped_data_network, reaches_save, data_network_folder, pnet_fields) # Remove all unnecessary fields keep_fields(reaches_save, fields_to_keep) #remove_empty_fields(reaches_save, pnet_fields) # Delete any temporary shape files created delete_temps(to_delete) create_csv( os.path.join(data_network_folder, "{}.csv".format(data_network_name)), reaches_save) to_merge[data_network_count].append( [reaches_save, data_network_name]) # Iterate through to_merge, and save a point and network shapefile for each data network arcpy.AddMessage("Saving Projectwide...") for data_network_type in to_merge: to_merge_networks = [] save_name = data_network_type[0][1] save_folder = make_folder(projectwide_output, save_name) for watershed in data_network_type: to_merge_networks.append(watershed[0]) csv_save = arcpy.Merge_management( to_merge_networks, os.path.join(save_folder, save_name + "_Points_Extracted.shp")) create_csv(os.path.join(save_folder, "{}.csv".format(save_name)), csv_save) finish()
def main(): # Initialize variables and file locations arcpy.env.overwriteOutput = True watershed_folders = get_watershed_folders(root_folder) # Setup projectwide data projectwide_output = make_folder( os.path.join(root_folder, "00_ProjectWide", "Outputs", "Comparisons"), "Categorical") projectwide_database = os.path.join(root_folder, "00_ProjectWide", "Inputs", "Database", "Field_Database.csv") delete_old(projectwide_output) keep_fields = [ "FID", "Shape", "POINT_X", "POINT_Y", "SnapDist", "FldRchLen", "EcoRgn_L4", "EcoRgn_L3", "HUC8", "NAME", "StreamName", "PRECIP", "DRAREA", "iGeo_ElMax", "iGeo_ElMin" ] # set the field lists to the values from the file # meta_group_field, meta_group_field_name, group_field, group_field_name, field_db_fields = read_field_csv(input_field_csv) graphs = read_field_csv_new(input_field_csv) for graph in graphs: to_merge = [] meta_group_field = graph[0] meta_group_field_name = graph[1] group_field = graph[2] group_field_name = graph[3] field_db_fields = graph[4] arcpy.AddMessage("Graphing {}...".format(group_field_name)) if meta_group_field and group_field_name: meta_exists = True else: meta_exists = False for watershed in watershed_folders: arcpy.AddMessage("\tWorking on {}...".format(watershed)) # Setup watershed data watershed_output = make_folder( os.path.join(watershed, "Outputs", "Comparisons"), "Categorical") delete_old(watershed_output) # Get the CSV with Field data watershed_db = projectwide_database # Get data from the field database field_data_list = csv_to_list(watershed_db) # Find certain field indexes in the field database id_field_db = field_data_list[0].index("""RchID""") field_indexes_db = [] for field_db_field in field_db_fields: field_indexes_db.append( field_data_list[0].index(field_db_field)) # remove headers field_data_list.pop(0) # Create a list with only necessary data field_compare_list = [] for row in field_data_list: to_add = [] # Add id column to_add.append(row[id_field_db]) # Add any other columns for index in field_indexes_db: to_add.append(row[index]) field_compare_list.append(to_add) # Get the CSV with extracted PNET data watershed_pnet = os.path.join(watershed, "Outputs", "Extracted_Data", "All_Data.csv") # Get data from the PNET output pnet_data_list = csv_to_list(watershed_pnet) # Find certain PNET indexes in the PNET output id_pnet = pnet_data_list[0].index("""RchID""") if group_field not in pnet_data_list[0]: arcpy.AddMessage( "Could not complete plots for {}, could not find {} field". format(watershed, group_field)) elif meta_exists and meta_group_field not in pnet_data_list[0]: arcpy.AddMessage( "Could not complete plots for {}, could not find {} field". format(watershed, meta_group_field)) else: group_pnet = pnet_data_list[0].index(group_field) if meta_exists: meta_group_pnet = pnet_data_list[0].index(meta_group_field) # remove headers pnet_data_list.pop(0) # Create a list with only necessary data pnet_compare_list = [] for row in pnet_data_list: to_add = [] # Add id column to_add.append(row[id_pnet]) # Add grouping columns if meta_exists: to_add.append(row[meta_group_pnet]) to_add.append(row[group_pnet]) # Add this row to the overall list pnet_compare_list.append(to_add) # Make list of new fields if meta_exists: new_fields = [ """RchID""", meta_group_field_name, group_field_name ] else: new_fields = ["""RchID""", group_field_name] for new_field in field_db_fields: # This is where field data will go new_fields.append("Y_" + new_field[:8]) # Perform data comparisons both_compare_list = [new_fields] for pnet_row in pnet_compare_list: new_row = [] # Get the ID of the current row current_site = pnet_row[0] # Find the corresponding row in the field data list for db_row_num, db_row in enumerate(field_compare_list): # If the two site are the same if db_row[0] == current_site: field_row = db_row break # Add the reach ID to our new row new_row.append(pnet_row[0]) # Add the group/metagroup field to our new row new_row.append(pnet_row[1]) if meta_exists: # Add the metagroup to our new row new_row.append(pnet_row[2]) # Prepare to iterate through each column of data, skipping rchID field_iter = iter(field_row) next(field_iter) for field_data in field_iter: # Make sure that the data is not missing if field_data != "": # Add data into the new row field_num = float(field_data) new_row.append(field_num) else: new_row += 0 both_compare_list.append(new_row) # Add in data for each of the other PNET fields (That were created in previous steps) pnet_data_list = csv_to_list(watershed_pnet) for row_num, row in enumerate(both_compare_list): # Add data from each PNET field data_to_add = [] for add_field in keep_fields: if add_field in pnet_data_list[0]: this_index = pnet_data_list[0].index(add_field) data_to_add.append( pnet_data_list[row_num][this_index]) both_compare_list[row_num] = data_to_add + row # Create a new shapefile to hold data template = os.path.join(watershed, "Outputs", "Extracted_Data", "Extraction_Merge_Points.shp") comparison_points = arcpy.CreateFeatureclass_management( watershed_output, "Categorical_Comparison_Points.shp", "POINT", spatial_reference=template) to_merge.append(comparison_points) # Add in new fields to the shapefile for field, example in zip(both_compare_list[0], both_compare_list[1]): # Make sure we are not adding in any already existing default fields shapefile_fields = get_fields(comparison_points) if field not in shapefile_fields: # Decide to add a text or float field if isinstance(example, str): arcpy.AddField_management(comparison_points, field[:10], "TEXT") else: arcpy.AddField_management(comparison_points, field[:10], "FLOAT") # Skip headers iter_list = iter(both_compare_list) next(iter_list) # remove useless field arcpy.DeleteField_management(comparison_points, "Id") # Add in data to the shapefile with arcpy.da.InsertCursor(comparison_points, '*') as inserter: with arcpy.da.SearchCursor(template, '*') as searcher: for row, search_row in zip(iter_list, searcher): # Steal Shape and FID data from template row[0] = search_row[0] row[1] = search_row[1] # Add in row inserter.insertRow(row) # Save as CSV create_csv( os.path.join(watershed_output, "Categorical_Comparison_Data.csv"), comparison_points) # Get a list of all the different metagroup types if meta_exists: metagroup_types = unique_values(comparison_points, meta_group_field_name[:10]) # Make a folder, shapefile, and plots for every metagroup if " " in metagroup_types: metagroup_types.remove(" ") for metagroup in metagroup_types: # Create a new folder for only data in this meta group plot_folder = make_folder( watershed_output, "{}_{}".format(meta_group_field_name.title(), metagroup.title())) delete_old(plot_folder) # Create a shapefile with only data we want to look at layer_name = 'temp' new_shapefile = os.path.join( plot_folder, '{}_{}_Comparison.shp'.format( meta_group_field_name.title(), metagroup.title())) arcpy.MakeFeatureLayer_management( comparison_points, layer_name) query = '{} = \'{}\''.format( meta_group_field_name[:10], metagroup) arcpy.SelectLayerByAttribute_management( layer_name, 'NEW_SELECTION', query) arcpy.CopyFeatures_management(layer_name, new_shapefile) # Create plots for this data create_plots(new_shapefile, group_field_name, field_db_fields, plot_folder, metagroup, meta_group_field_name) else: plot_folder = make_folder( watershed_output, "{}".format(group_field_name.title())) delete_old(plot_folder) # Create a shapefile with only data we want to look at layer_name = 'temp' new_shapefile = os.path.join( plot_folder, '{}_Comparison.shp'.format(group_field_name.title())) arcpy.MakeFeatureLayer_management(comparison_points, layer_name) arcpy.CopyFeatures_management(layer_name, new_shapefile) # Create plots for this data create_plots(new_shapefile, group_field_name, field_db_fields, plot_folder) arcpy.Delete_management(new_shapefile) # Do projectwide arcpy.AddMessage('\tSaving ProjectWide...') save_loc = os.path.join( projectwide_output, "Categorical_Comparison_Points_{}.shp".format(group_field_name)) merged = arcpy.Merge_management(to_merge, save_loc) create_csv( os.path.join(projectwide_output, "Categorical_Comparison_Data.csv"), merged) if meta_exists: # Get a list of all the different metagroup types metagroup_types = unique_values(merged, meta_group_field_name[:10]) # Make a folder, shapefile, and plots for every metagroup if " " in metagroup_types: metagroup_types.remove(" ") for metagroup in metagroup_types: # Create a new folder for only data in this meta group plot_folder = make_folder( projectwide_output, "{}_{}".format(meta_group_field_name.title(), metagroup.title())) delete_old(plot_folder) # Create a shapefile with only data we want to look at layer_name = 'temp' new_shapefile = os.path.join( plot_folder, '{}_{}_Comparison.shp'.format( meta_group_field_name.title(), metagroup.title())) arcpy.MakeFeatureLayer_management(merged, layer_name) query = '{} = \'{}\''.format(meta_group_field_name[:10], metagroup) arcpy.SelectLayerByAttribute_management( layer_name, 'NEW_SELECTION', query) arcpy.CopyFeatures_management(layer_name, new_shapefile) # Create plots for this data create_plots(new_shapefile, group_field_name, field_db_fields, plot_folder, metagroup, meta_group_field_name) else: plot_folder = make_folder(projectwide_output, "{}".format(group_field_name.title())) delete_old(plot_folder) # Create a shapefile with only data we want to look at layer_name = 'temp' new_shapefile = os.path.join( plot_folder, '{}_Comparison.shp'.format(group_field_name.title())) arcpy.MakeFeatureLayer_management(merged, layer_name) arcpy.CopyFeatures_management(layer_name, new_shapefile) # Create plots for this data create_plots(new_shapefile, group_field_name, field_db_fields, plot_folder) arcpy.Delete_management(new_shapefile) arcpy.Delete_management(merged)
def main(): # Initialize variables arcpy.env.overwriteOutput = True watershed_folders = get_watershed_folders(root_folder) delete_old( os.path.join(root_folder, "00_ProjectWide", "Intermediates", "Reach_Editing", "Outputs")) temps_to_delete = [] to_merge_points = [] to_merge_reaches = [] for watershed in watershed_folders: arcpy.AddMessage("Starting {}...".format(watershed)) # Get file names input_folder = os.path.join(watershed, "Intermediates", "Reach_Editing", "Inputs") output_folder = os.path.join(watershed, "Intermediates", "Reach_Editing", "Outputs") delete_old(output_folder) stream_seg = os.path.join(input_folder, "Stream_Network_Segments.shp") points = os.path.join(input_folder, "Points_Merge.shp") stream_seg_copy = os.path.join( input_folder, "Stream_Network_Segments_To_Edit_Temp.shp") points_copy = os.path.join(input_folder, "Points_Merge_To_Edit_Temp.shp") temps_to_delete.append(stream_seg_copy) temps_to_delete.append(points_copy) arcpy.Copy_management(stream_seg, stream_seg_copy) arcpy.Copy_management(points, points_copy) stream_seg = stream_seg_copy points = points_copy # Spatial jon stream network segments by points fields_to_remove = ["TARGET_FID", "JOIN_FID", "Join_Count"] spatial_joined = os.path.join(input_folder, "Spatial_Joined_Temp.shp") temps_to_delete.append(spatial_joined) remove_fields(fields_to_remove, stream_seg) arcpy.SpatialJoin_analysis(stream_seg, points, spatial_joined, "JOIN_ONE_TO_MANY") # Get an attribute table list of the joined shapefile to analyze stream_seg_list = attribute_table_to_list(spatial_joined) reach_id_index = get_field_index("TARGET_FID", spatial_joined) point_id_index = get_field_index("SiteID", spatial_joined) tor_bor_index = get_field_index("TOR_BOR", spatial_joined) new_list = split_list_by_id(stream_seg_list, reach_id_index) to_delete_list = [] keep_points_list = [] # Check which segments we need to delete for segment in new_list: if delete_segment(segment, point_id_index, tor_bor_index, reach_id_index): to_delete_list.append(segment[0][reach_id_index]) else: keep_points_list.append(segment[0][point_id_index]) segments_layer = "Segments" arcpy.MakeFeatureLayer_management(stream_seg, segments_layer) # Delete the segments we need to from initial segments for to_delete in to_delete_list: arcpy.SelectLayerByAttribute_management( segments_layer, 'ADD_TO_SELECTION', 'FID = {}'.format(to_delete)) # Save the reaches we want to keep arcpy.SelectLayerByAttribute_management(segments_layer, 'SWITCH_SELECTION') reach_save_location = os.path.join(output_folder, "Field_Reaches.shp") arcpy.CopyFeatures_management(segments_layer, reach_save_location) to_merge_reaches.append(reach_save_location) # Save the points we want to keep points_layer = "Points" arcpy.MakeFeatureLayer_management(points, points_layer) for to_keep in keep_points_list: arcpy.SelectLayerByAttribute_management( points_layer, 'ADD_TO_SELECTION', 'SiteID = {}'.format(to_keep)) point_save_location = os.path.join(output_folder, "Field_Points.shp") arcpy.CopyFeatures_management(points_layer, point_save_location) to_merge_points.append(point_save_location) num_points = int(arcpy.GetCount_management(point_save_location)[0]) num_reaches = int(arcpy.GetCount_management(reach_save_location)[0]) # Check that everything was done correctly if (num_points / 2) != num_reaches: arcpy.AddMessage( "\t This watershed does not have one field reach per two field points!" ) arcpy.AddMessage("Saving ProjectWide...") projectwide_folder = os.path.join(root_folder, "00_ProjectWide", "Intermediates", "Reach_Editing", "Outputs") arcpy.Merge_management( to_merge_points, os.path.join(projectwide_folder, "Field_Points.shp")) arcpy.Merge_management( to_merge_reaches, os.path.join(projectwide_folder, "Field_Reaches.shp")) delete_temps(temps_to_delete) finish()
def main(): # Initialize variables and file locations arcpy.env.overwriteOutput = True watershed_folders = get_watershed_folders(root_folder) projectwide_output = os.path.join(root_folder, "00_ProjectWide", "Intermediates", "Extraction", "Inputs") temps_to_delete = [] keep_fields = [ "Shape", "FID", "SiteID", "RchID", "POINT_X", "POINT_Y", "SnapDist" ] to_merge_reaches = [] to_merge_points = [] delete_old( os.path.join(root_folder, "00_ProjectWide", "Inputs", "Parameters")) delete_old(projectwide_output) for watershed in watershed_folders: arcpy.AddMessage("Starting {}...".format(watershed)) # Get necessary files output_folder = os.path.join(watershed, "Intermediates", "Extraction", "Inputs") in_reaches = os.path.join(watershed, "Intermediates", "Reach_Editing", "Outputs", "Field_Reaches.shp") in_points = os.path.join(watershed, "Intermediates", "Reach_Editing", "Outputs", "Field_Points.shp") points_temp = os.path.join(output_folder, "p_temp.shp") reaches_temp = os.path.join(output_folder, "r_temp.shp") reaches_joined = os.path.join(output_folder, "r_join.shp") points_joined = os.path.join(output_folder, "p_join.shp") temps_to_delete.extend( [points_temp, reaches_temp, reaches_joined, points_joined]) points_final = os.path.join(output_folder, "Field_Points_Clean.shp") reaches_final = os.path.join(output_folder, "Field_Reaches_Clean.shp") # Add field for the length of the field reach arcpy.Copy_management(in_reaches, reaches_temp) field_to_add = "FldRchLen" keep_fields.append(field_to_add) field_names = [f.name for f in arcpy.ListFields(reaches_temp)] if field_to_add not in field_names: arcpy.AddField_management(reaches_temp, field_to_add, "DOUBLE") arcpy.CalculateField_management(reaches_temp, field_to_add, "!shape.length@meters!", "PYTHON_9.3", "") # Reduce points to only BOR points points_layer = "Points" arcpy.MakeFeatureLayer_management(in_points, points_layer) arcpy.SelectLayerByAttribute_management(points_layer, 'NEW_SELECTION', "\"TOR_BOR\" = \'BOR\'") arcpy.CopyFeatures_management(points_layer, points_temp) # Add all point data to the reaches arcpy.SpatialJoin_analysis(reaches_temp, points_temp, reaches_joined, "JOIN_ONE_TO_ONE") # Add all reach data to the points arcpy.SpatialJoin_analysis(points_temp, reaches_temp, points_joined, "JOIN_ONE_TO_ONE") # Only keep fields we need shapes = [points_joined, reaches_joined] for shape in shapes: # Removes all fields from the shapefile that are not in the above list of fields to keep field_names = [f.name for f in arcpy.ListFields(shape)] delete_fields = [] for field in field_names: if field not in keep_fields: delete_fields.append(field) arcpy.DeleteField_management(shape, delete_fields) # Save the points and reaches to_merge_points.append( arcpy.CopyFeatures_management(points_joined, points_final)) to_merge_reaches.append( arcpy.CopyFeatures_management(reaches_joined, reaches_final)) arcpy.AddMessage("Saving Projectwide...") arcpy.Merge_management( to_merge_points, os.path.join(projectwide_output, "Field_Points_Clean")) arcpy.Merge_management( to_merge_reaches, os.path.join(projectwide_output, "Field_Reaches_Clean")) delete_temps(temps_to_delete) finish()
def main(): # Initialize Variables arcpy.env.overwriteOutput = True saved_tor_points_snapped = [] saved_bor_points_snapped = [] saved_tor_points_unsnapped = [] saved_bor_points_unsnapped = [] watershed_folders = get_watershed_folders(root_folder) # Delete old content from this tool being re run. delete_old( os.path.join(root_folder, "00_ProjectWide", "Intermediates", "Points", "Snapped")) delete_old( os.path.join(root_folder, "00_ProjectWide", "Intermediates", "Points", "Unsnapped")) # This loops for every watershed for watershed_folder in watershed_folders: arcpy.AddMessage("Starting {}...".format(watershed_folder)) # Get all file names output_folder = os.path.join(watershed_folder, "Intermediates", "Points") network = os.path.join(watershed_folder, "Inputs", "Stream_Network", "Stream_Network.shp") delete_old(os.path.join(output_folder, "Snapped")) delete_old(os.path.join(output_folder, "Unsnapped")) bor_points_old = os.path.join(watershed_folder, "Inputs", "Points", "BOR_Points.shp") tor_points_old = os.path.join(watershed_folder, "Inputs", "Points", "TOR_Points.shp") bor_points_new_temp = os.path.join(output_folder, "BOR_Points_Temp.shp") tor_points_new_temp = os.path.join(output_folder, "TOR_Points_Temp.shp") bor_points_new_snapped = os.path.join(output_folder, "Snapped", "BOR_Points_Snapped.shp") tor_points_new_snapped = os.path.join(output_folder, "Snapped", "TOR_Points_Snapped.shp") saved_bor_points_snapped.append(bor_points_new_snapped) saved_tor_points_snapped.append(tor_points_new_snapped) bor_points_new_unsnapped = os.path.join(output_folder, "Unsnapped", "BOR_Points_Unsnapped.shp") tor_points_new_unsnapped = os.path.join(output_folder, "Unsnapped", "TOR_Points_Unsnapped.shp") saved_bor_points_unsnapped.append(bor_points_new_unsnapped) saved_tor_points_unsnapped.append(tor_points_new_unsnapped) arcpy.CopyFeatures_management(bor_points_old, bor_points_new_temp) arcpy.CopyFeatures_management(tor_points_old, tor_points_new_temp) points_list = [tor_points_new_temp, bor_points_new_temp] # This loops once for TOR and once for BOR, snaps all points for counter, points in enumerate(points_list): if counter == 0: label = "TOR" else: label = "BOR" snap_dist = 0 all_snapped = False total_points = arcpy.GetCount_management(points) base_list = [] # TODO make more general for row in arcpy.da.SearchCursor(points, "RchID"): base_list.append(row[0]) snap_value_list = [999] * len(base_list) arcpy.AddField_management(points, "SnapDist", "SHORT") # This loops until all points are snapped to the network, incrementing the snap distance by 10 each time. while all_snapped is False: # Increment snap distance by an increment snap_dist += custom_increment snap_name = str(snap_dist) + " Meters" arcpy.AddMessage("\t Snapping {} {}".format(label, snap_name)) # Snap the points arcpy.Snap_edit(points, [[network, "EDGE", snap_name]]) temp = os.path.join(watershed_folder, "temporary.shp") # Create an intersect of the points and the network arcpy.Intersect_analysis([points, network], temp) current_snapped = arcpy.GetCount_management(temp) # Update each snap distance value for points that were just snapped # TODO make more general for row in arcpy.da.SearchCursor(temp, "RchID"): if snap_value_list[base_list.index(row[0])] == 999: snap_value_list[base_list.index(row[0])] = snap_dist # Checks to see if every point has been snapped yet if (str(current_snapped) == str(total_points)) or \ (use_threshold is True and snap_dist >= threshold_range): # All points have been snapped, or are beyond the given threshold all_snapped = True # Delete temporary file arcpy.Delete_management(temp) # Add XY data to each point arcpy.AddXY_management(points) # Populate the snap distance field with arcpy.da.UpdateCursor(points, "SnapDist") as cursor: for count, row in enumerate(cursor): row[0] = snap_value_list[count] cursor.updateRow(row) # Create a layer to select from points_layer = "Points" arcpy.MakeFeatureLayer_management(points, points_layer) # Save snapped and unsnapped points if label == "TOR": arcpy.SelectLayerByAttribute_management( points_layer, 'NEW_SELECTION', 'SnapDist >= 999') arcpy.CopyFeatures_management(points_layer, tor_points_new_unsnapped) arcpy.SelectLayerByAttribute_management( points_layer, 'SWITCH_SELECTION') arcpy.CopyFeatures_management(points_layer, tor_points_new_snapped) arcpy.SelectLayerByAttribute_management( points_layer, 'CLEAR_SELECTION') if label == "BOR": arcpy.SelectLayerByAttribute_management( points_layer, 'NEW_SELECTION', 'SnapDist >= 999') arcpy.CopyFeatures_management(points_layer, bor_points_new_unsnapped) arcpy.SelectLayerByAttribute_management( points_layer, 'SWITCH_SELECTION') arcpy.CopyFeatures_management(points_layer, bor_points_new_snapped) arcpy.SelectLayerByAttribute_management( points_layer, 'CLEAR_SELECTION') # Delete temporary files arcpy.Delete_management(bor_points_new_temp) arcpy.Delete_management(tor_points_new_temp) arcpy.AddMessage("Saving ProjectWide Files...") output_folder = os.path.join(root_folder, "00_ProjectWide", "Intermediates", "Points") bor_points_new_snapped = os.path.join(output_folder, "Snapped", "BOR_Points_Snapped.shp") tor_points_new_snapped = os.path.join(output_folder, "Snapped", "TOR_Points_Snapped.shp") bor_points_new_unsnapped = os.path.join(output_folder, "Unsnapped", "BOR_Points_Unsnapped.shp") tor_points_new_unsnapped = os.path.join(output_folder, "Unsnapped", "TOR_Points_Unsnapped.shp") arcpy.Merge_management(saved_bor_points_snapped, bor_points_new_snapped) arcpy.Merge_management(saved_tor_points_snapped, tor_points_new_snapped) arcpy.Merge_management(saved_bor_points_unsnapped, bor_points_new_unsnapped) arcpy.Merge_management(saved_tor_points_unsnapped, tor_points_new_unsnapped) arcpy.Copy_management( bor_points_new_unsnapped, os.path.join(output_folder, "Unsnapped_Fixed", "To_Fix_BOR.shp")) arcpy.Copy_management( tor_points_new_unsnapped, os.path.join(output_folder, "Unsnapped_Fixed", "To_Fix_TOR.shp")) finish()