Exemplo n.º 1
0
def minimum_bounding_geometry(the_scenario, logger):
    logger.info("start: minimum_bounding_geometry")
    arcpy.env.workspace = the_scenario.main_gdb

    # Clean up any left of layers from a previous run
    if arcpy.Exists("road_lyr"):
        arcpy.Delete_management("road_lyr")
    if arcpy.Exists("rail_lyr"):
        arcpy.Delete_management("rail_lyr")
    if arcpy.Exists("water_lyr"):
        arcpy.Delete_management("water_lyr")
    if arcpy.Exists("pipeline_prod_trf_rts_lyr"):
        arcpy.Delete_management("pipeline_prod_trf_rts_lyr")
    if arcpy.Exists("pipeline_crude_trf_rts_lyr"):
        arcpy.Delete_management("pipeline_crude_trf_rts_lyr")
    if arcpy.Exists("Locations_MBG"):
        arcpy.Delete_management("Locations_MBG")
    if arcpy.Exists("Locations_MBG_Buffered"):
        arcpy.Delete_management("Locations_MBG_Buffered")

    # Determine the minimum bounding geometry of the scenario
    arcpy.MinimumBoundingGeometry_management("Locations", "Locations_MBG",
                                             "CONVEX_HULL")

    # Buffer the minimum bounding geometry of the scenario
    arcpy.Buffer_analysis("Locations_MBG", "Locations_MBG_Buffered",
                          "100 Miles", "FULL", "ROUND", "NONE", "", "GEODESIC")

    # Select the roads within the buffer
    # -----------------------------------
    arcpy.MakeFeatureLayer_management("road", "road_lyr")
    arcpy.SelectLayerByLocation_management("road_lyr", "INTERSECT",
                                           "Locations_MBG_Buffered")

    result = arcpy.GetCount_management("road")
    count_all_roads = float(result.getOutput(0))

    result = arcpy.GetCount_management("road_lyr")
    count_roads_subset = float(result.getOutput(0))

    ## CHANGE
    if count_all_roads > 0:
        roads_percentage = count_roads_subset / count_all_roads
    else:
        roads_percentage = 0

    # Only subset if the subset will result in substantial reduction of the road network size
    if roads_percentage < 0.75:
        # Switch selection to identify what's outside the buffer
        arcpy.SelectLayerByAttribute_management("road_lyr", "SWITCH_SELECTION")

        #Add in FC 1 roadways (going to keep all interstate highways
        arcpy.SelectLayerByAttribute_management("road_lyr",
                                                "REMOVE_FROM_SELECTION",
                                                "FCLASS = 1")

        # Delete the features outside the buffer
        with arcpy.da.UpdateCursor('road_lyr', ['OBJECTID']) as ucursor:
            for ucursor_row in ucursor:
                ucursor.deleteRow()

        arcpy.Delete_management("road_lyr")

        # # Select the rail within the buffer
        # # ---------------------------------

    arcpy.Delete_management("Locations_MBG")
    arcpy.Delete_management("Locations_MBG_Buffered")

    # finally, compact the geodatabase so the MBG has an effect on runtime.
    arcpy.Compact_management(the_scenario.main_gdb)
    logger.debug("finish: minimum_bounding_geometry")
Exemplo n.º 2
0
    while row:
        row.ID = row.OBJECTID
        if units == "Feet":
            row.LengthFt = str(round(row.SHAPE_Length, 1))
        else:
            row.LengthFt = str(round(row.SHAPE_Length * 3.280839896, 1))
        rows.updateRow(row)
        row = rows.next()

    del row
    del rows

    # ------------------------------------------------------------------------ Select Contours by dam Location & copy
    AddMsgAndPrint("\nSelecting intersected contours...", 0)

    arcpy.MakeFeatureLayer_management(InputContours, contourLyr)
    arcpy.SelectLayerByLocation_management(contourLyr, "INTERSECT", damTemp,
                                           "", "NEW_SELECTION")
    arcpy.CopyFeatures_management(contourLyr, contourMask)
    arcpy.SelectLayerByAttribute_management(contourLyr, "CLEAR_SELECTION")

    # ------------------------------------------------------------------------ Buffer and erase to break contours at dam and select closed contours
    # (This is the part requiring arcInfo -- Erase and Right/Left buffers...the right comes later...)
    arcpy.Buffer_analysis(damTemp, buffer1, "1 Feet", "FULL", "FLAT", "NONE",
                          "")
    arcpy.Erase_analysis(contourMask, buffer1, contourErase, "")

    arcpy.Buffer_analysis(damTemp, buffer2, "1.5 Feet", "LEFT", "FLAT", "NONE",
                          "")
    arcpy.Buffer_analysis(damTemp, buffer3, "3 Feet", "LEFT", "FLAT", "NONE",
                          "")
Exemplo n.º 3
0
import arcpy
import os
import config

sde_connection = config.sde_connection
#default_gdb_connection = r"C:\Users\malika\Documents\ArcGIS\Default.gdb"
#arcpy.env.workspace = default_gdb_connection
arcpy.env.overwriteOutput=True 

#INPUTS - Address feature class and buffer distance
addr = arcpy.GetParameterAsText(0) 
distance = arcpy.GetParameterAsText(1)

#CONVERT TO LAYER
addressFC = os.path.join(sde_connection, config.address)
arcpy.MakeFeatureLayer_management (addressFC, "addr_lyr")

titleFC = os.path.join(sde_connection, config.title_parcel)
arcpy.MakeFeatureLayer_management (titleFC, "title_lyr")

spatial_reference = arcpy.Describe(titleFC).spatialReference

#TABLE VIEW
titles_table = os.path.join(sde_connection, config.owner_table)
arcpy.MakeTableView_management (titles_table, "table_titles")

#SELECT BY LOCATION
arcpy.SelectLayerByLocation_management("title_lyr", "WITHIN_A_DISTANCE", addr, distance, "NEW_SELECTION")
#arcpy.CopyFeatures_management('title_lyr', "in_memory/selected_title")

#TABLE JOIN TITLE PARCELS
Exemplo n.º 4
0
if arcpy.Exists(intermediates[0]):
    print "ALREADY COMPLETED, SKIPPING: copy of the master signs layer for editing"
else:
    print "Making a copy of the master signs layer for editing"
    arcpy.Copy_management(in_data=signsMaster_loc, out_data=intermediates[0])

if arcpy.Exists(intermediates[1]):
    print "ALREADY COMPLETED, SKIPPING: removing duplicates from sign layer"
else:
    print "Exporting a sorted version of the signs layer"
    arcpy.Sort_management(in_dataset=intermediates[0],
                          out_dataset=intermediates[1],
                          sort_field=[['UNITID_1', 'ASCENDING']])
    print "Deleting duplicates"
    arcpy.MakeFeatureLayer_management(intermediates[1], "points_del")
    cursor_points = arcpy.da.UpdateCursor("points_del", ['UNITID_1'])
    UnIDs = [0, 0]
    for point_row in cursor_points:
        print UnIDs
        UnIDs[1] = point_row[0]
        if UnIDs[1] == UnIDs[0]:
            cursor_points.deleteRow()
            print "deletion"
        else:
            print "non deletion"
        UnIDs[0] = point_row[0]

if arcpy.Exists(intermediates[2]):
    print "ALREADY COMPLETED, SKIPPING: Buffering the signs"
else:
Exemplo n.º 5
0
# Retrieve all values from field in attribute table to list
field_values = [row[0] for row in arcpy.da.SearchCursor(shp, [field])]
# Long form this looks like
#field_values = []
#with arcpy.da.SearchCursor(shp, [field]) as cursor:
#    for row in cursor:
#        field_values.append(row[0])

# Coerce that list to a set (all unique) then back to a list (order changes)
unique_values = list(set(field_values))

# Generate selection layer from attribute query to copy
# First make the shp a layer
lyr = "parcels_lyr"
arcpy.MakeFeatureLayer_management(shp, lyr)
# Now make the sql query string, there are many ways to do this
drop_values = [
    'COUNTIES (OTHER THAN PUBLIC SCHOOLS, COLLEGES, HOSPITALS) INCLUDING NON-MUNICIPAL GOVERNMENT',
    'FEDERAL, OTHER THAN MILITARY, FORESTS, PARKS, RECREATIONAL AREAS, HOSPITALS, COLLEGES',
    'MILITARY',
    'PARCELS WITH NO VALUES',
    'PUBLIC COUNTY SCHOOLS - INCLUDING ALL PROPERTY OF BOARD OF PUBLIC INSTRUCTION',
    'RIVERS AND LAKES, SUBMERGED LANDS',
    'STATE, OTHER THAN MILITARY, FORESTS, PARKS, RECREATIONAL AREAS, COLLEGES, HOSPITALS',
    'UTILITY, GAS AND ELECTRICITY, TELEPHONE AND TELEGRAPH, LOCALLY ASSESSED RAILROADS, WATER AND SEWER SERVICE, PIPELINES, CANALS, RADIO/TELEVISION COMMUNICATION',
    'RIGHT-OF-WAY, STREETS, ROADS, IRRIGATION CHANNEL, DITCH, ETC.',
    'MUNICIPAL, OTHER THAN PARKS, RECREATIONAL AREAS, COLLEGES, HOSPITALS',
    'AIRPORTS (PRIVATE OR COMMERCIAL), BUS TERMINALS, MARINE TERMINALS, PIERS, MARINAS',
    'SEWAGE DISPOSAL, SOLID WASTE, BORROW PITS, DRAINAGE RESERVOIRS, WASTE LAND, MARSH, SAND DUNES, SWAMPS',
]
Exemplo n.º 6
0
    def execute(self, parameters, messages):
        """The source code of the tool."""

        try:

            # Clear memory JIC
            deleteInMemory()

            # Get the analysis ID
            analysis_id = (parameters[34].valueAsText.split("[")[1][:5])+"_"+\
                          (parameters[33].valueAsText.split("[")[1][:3])+"_"+\
                          (parameters[32].valueAsText)

            # Get the alternative selection
            alternative = parameters[34].valueAsText.split("[")[1][:5]

            # Make a directory
            parent_folder_path = os.path.join(
                os.path.dirname(parameters[1].valueAsText),
                os.path.basename(parameters[1].valueAsText))
            child_folder_path = parent_folder_path + "\\" + analysis_id

            # JIC
            if not os.path.exists(parent_folder_path):
                os.mkdir(parent_folder_path)

            if not os.path.exists(child_folder_path):
                os.mkdir(child_folder_path)

            date_time_stamp = re.sub('[^0-9]', '',
                                     str(datetime.datetime.now())[5:16])
            #filename = os.path.basename(__file__)
            analysis_id_time_stamp = analysis_id + "_" + date_time_stamp

            # Create the logger
            report_path = child_folder_path + "\\" + analysis_id_time_stamp + "_Report.txt"
            logfile_path = child_folder_path + "\\" + analysis_id_time_stamp + "_Logfile.txt"
            logger = pyt_log(report_path, logfile_path)

            ###
            #logger.log_active = False # Uncomment to disable logfile
            ###

            # Start logging
            logger.log_all("Surface Use Analysis " +
                           str(datetime.datetime.now()))
            logger.log_report("_" * 120 + "\n")
            logger.log_all("Running environment: Python - {}\n".format(
                sys.version))
            logger.log_all("User: "******"\n")
            logger.log_all("Analysis Type: " + analysis_id + "\n")
            logger.log_all("Analysis Area:\n")
            logger.log_all('\t' + parameters[0].valueAsText + '\n')
            logger.log_all("Output Location:\n")
            logger.log_all('\t' + parameters[1].valueAsText + '\n')

            #######################################################################################################################
            ##
            ## MAIN PROGRAM
            ##
            #######################################################################################################################

            # Make a geodatabase
            database_name = analysis_id_time_stamp + '.gdb'
            database_path = child_folder_path
            arcpy.CreateFileGDB_management(database_path, database_name,
                                           "10.0")
            output_path = database_path + "\\" + database_name
            logger.log_all('Created geodatabase at: \n')
            logger.log_all('\t' + output_path + "\n")

            # Secure a copy of the input analysis area
            arcpy.MakeFeatureLayer_management(parameters[0].value,
                                              "in_memory\\_")

            # Dissolve everything to prevent overlapping input polygons
            logger.console('Dissolving input polygon')
            arcpy.Dissolve_management("in_memory\\_", "in_memory\\__")
            analysis_area = output_path + "\\" + analysis_id + "_Analysis_Area"
            arcpy.CopyFeatures_management("in_memory\\__", analysis_area)

            # Set the workspace to the output database
            arcpy.env.workspace = output_path
            logger.logfile("Env workspace:", output_path)

            # Identify spatial reference of analysis area
            spatial_ref = arcpy.Describe(analysis_area).spatialReference
            logger.logfile("Spatial reference:", str(spatial_ref))

            # The main data structure - key = parameter ID, values = ['input parameter paths', 'category', 'Code']
            input_params = {
                '02air_quality_climate':
                [parameters[2].valueAsText, 'Resources', 'AIR_QUAL'],
                '03aquatic_wildlife':
                [parameters[3].valueAsText, 'Resources', 'AQUAT_WL'],
                '04cultural_resources':
                [parameters[4].valueAsText, 'Resources', 'CULTURAL'],
                '05fire_fuel':
                [parameters[5].valueAsText, 'Resources', 'FIRE_FUELS'],
                '06geology': [parameters[6].valueAsText, 'Resources', 'GEO'],
                '07wilderness_characteristics':
                [parameters[7].valueAsText, 'Resources', 'LWC'],
                '08paleo_resources':
                [parameters[8].valueAsText, 'Resources', 'PALEO'],
                '09soil_resources':
                [parameters[9].valueAsText, 'Resources', 'SOIL'],
                '10special_status_species':
                [parameters[10].valueAsText, 'Resources', 'SS_SPECIES'],
                '11terrestrial_wildlife':
                [parameters[11].valueAsText, 'Resources', 'TERR_WL'],
                '12tribal_concerns':
                [parameters[12].valueAsText, 'Resources', 'TRIBAL'],
                '13vegetation':
                [parameters[13].valueAsText, 'Resources', 'VEG'],
                '14visual_resources':
                [parameters[14].valueAsText, 'Resources', 'VISUAL'],
                '15water_resources': [
                    parameters[15].valueAsText, 'Resources', 'WATER'
                ],
                '16wetlands_riparian': [
                    parameters[16].valueAsText, 'Resources', 'WETLANDS'
                ],
                '17forestry': [
                    parameters[17].valueAsText, 'Resource_Uses', 'FORESTRY'
                ],
                '18livestock_grazing': [
                    parameters[18].valueAsText, 'Resource_Uses', 'GRAZING'
                ],
                '19lands_realty': [
                    parameters[19].valueAsText, 'Resource_Uses', 'LANDS'
                ],
                '20minerals': [
                    parameters[20].valueAsText, 'Resource_Uses', 'MINERALS'
                ],
                '21recreation': [
                    parameters[21].valueAsText, 'Resource_Uses', 'REC'
                ],
                '22renewable_energy': [
                    parameters[22].valueAsText, 'Resource_Uses', 'RENEWABLE'
                ],
                '23south_park_MLP': [
                    parameters[23].valueAsText, 'Resource_Uses', 'SPMLP'
                ],
                '24travel_transportation': [
                    parameters[24].valueAsText, 'Resource_Uses', 'TRAVEL'
                ],
                '25ACECs': [
                    parameters[25].valueAsText, 'Special_Designations', 'ACEC'
                ],
                '26BCAs': [
                    parameters[26].valueAsText, 'Special_Designations', 'BCA'
                ],
                '27scenic_byways':
                [parameters[27].valueAsText, 'Special_Designations', 'BYWAYS'],
                '28wilderness_areas_WSAs': [
                    parameters[28].valueAsText, 'Special_Designations', 'WSA'
                ],
                '29wild_scenic_rivers': [
                    parameters[29].valueAsText, 'Special_Designations', 'WSR'
                ],
                '30aml_hazmat':
                [parameters[30].valueAsText, 'Social_Economics', 'AML_HAZMAT'],
                '31social_economic_values': [
                    parameters[31].valueAsText, 'Social_Economics', 'SOC_ECON'
                ]
            }

            # Create a sorted list of input parameters with actual values
            sorted_inputs = sorted([
                item for item in input_params.items() if not item[1][0] == None
            ])
            logger.logfile('Raw inputs:', sorted_inputs)
            logger.logfile('Valid inputs:', sorted_inputs)

            # Verify that there were some inputs
            if len(sorted_inputs) == 0:
                logger.log_all('No Inputs')
                logger.log_all("There are no valid inputs - system exit")
                sys.exit()

            # Get a list of the categories represented in the input data
            input_categories = set([item[1][1] for item in sorted_inputs])
            logger.logfile('Input categories:', input_categories)

            # Create feature datasets: 'Inputs' for copy of input data, 'Results' for outputs
            arcpy.CreateFeatureDataset_management(output_path, "Inputs",
                                                  spatial_ref)
            arcpy.CreateFeatureDataset_management(output_path, "Results",
                                                  spatial_ref)

            # Function to copy the unioned layers and dissolve to input data - deletes attribute data!
            def union_inputs(name, fc_list):
                union_output = output_path + "\\Inputs\\" + name
                arcpy.Union_analysis(fc_list, "in_memory\\dissolve")
                arcpy.Dissolve_management("in_memory\\dissolve", union_output)
                return

            # Iterate across sorted items and create union output
            logger.console(
                'Dissolving criteria unions        ---this will probably be slow---'
            )
            for id, data in sorted_inputs:
                union_inputs(id[2:], data[0])

            # Write inputs to report
            for category in input_categories:
                logger.report("\n" + category.upper() + ":\n")
                for ID, data_list in sorted_inputs:
                    paths = data_list[0].split(";")
                    if data_list[1] == category:
                        logger.report('\t' + ID[2:].upper().replace("_", " ") +
                                      " - " + data_list[2] + '\n')
                        for path_name in paths:
                            logger.report("\t\t" + path_name)
                        logger.report("\n")

            # Create a master list of all category fcs that were created for later intersection
            all_fcs_list = []
            for fc in arcpy.ListFeatureClasses(feature_dataset="Inputs"):
                all_fcs_list.append(fc)
            logger.logfile('all_fcs_list', all_fcs_list)

            # For each fc in all_fcs_list,
            # dissolve the fc and out put as analysis_id + feature name
            for fc in all_fcs_list:
                logger.logfile("FC", fc)
                output_fc_name = "Restriction_" + os.path.basename(fc)
                logger.logfile('output_fc_name', output_fc_name)
                output_fc_path = output_path + "\\Results\\" + output_fc_name
                #output_fc_path = output_path+"\\"+output_fc_name
                logger.logfile('output_fc_path', output_fc_path)
                arcpy.Clip_analysis(analysis_area, fc, "in_memory\\clip")
                # Dissolve the clips
                arcpy.Dissolve_management("in_memory\\clip", output_fc_path)

            # map the criteria to their categories
            fc_id_map = defaultdict(str)
            for key, value in sorted_inputs:
                fc_id_map[key[2:]] = value[2]

            # Collapse geometry union [will be slow with full input]
            logger.console(
                'Unioning all criteria inputs         ---this will probably be slow---'
            )
            output_aggregate_feature = output_path + "\\Aggregate_Results"

            # Add input analysis area to list of union and union it all
            all_fcs_list_copy = copy.deepcopy(all_fcs_list)
            ### Try actual path to Analysis_Area
            #all_fcs_list_copy.append(u'Analysis_Area')
            all_fcs_list_copy.append(analysis_area)
            logger.logfile("all_fcs_list_copy", all_fcs_list_copy)
            arcpy.Union_analysis(all_fcs_list_copy, "in_memory\\agg_union")

            # Clip the union and output it
            arcpy.Clip_analysis("in_memory\\agg_union", analysis_area,
                                "in_memory\\clip_")

            # Make sure everything is in single-part format for later analysis - JIC
            arcpy.MultipartToSinglepart_management("in_memory\\clip_",
                                                   output_aggregate_feature)

            # Create the matrix
            logger.console(
                'Creating matrix            ---this will probably be slow---')

            # Erase all the other fields - sometimes its easier to ask for forgiveness than permission [ietafftp]
            erase_fields_lst = [
                field.name
                for field in arcpy.ListFields(output_aggregate_feature)
            ]
            for field in erase_fields_lst:
                try:
                    arcpy.DeleteField_management(output_aggregate_feature,
                                                 field)
                except:
                    logger.logfile(
                        "Delete field failed:", field
                    )  # Should minimally fail on OID, Shape, Shape_area, and Shape_length

            # Delete identical features within output_aggregate_feature to prevent double counting acres
            try:
                arcpy.DeleteIdentical_management(output_aggregate_feature,
                                                 ["SHAPE"])
                logger.logfile("Delete identical succeeded")
            except:
                logger.logfile("Delete identical failed"
                               )  # This will usually fail - it's ok

            # Calculate acres for output_aggregate_field and get acre field name
            acre_field = get_acres(output_aggregate_feature)

            # Create a defaultdict to store acreages - default dictionaries are awesome!
            acreage_counts = defaultdict(int)

            # Iterate across all_fcs_list and add field, select by location and calculate field with ID, remove null
            arcpy.MakeFeatureLayer_management(output_aggregate_feature,
                                              "in_memory\\mem_agg_layer")

            # Create a list to store all added field ids
            fc_field_list = []

            logger.console('Populating matrix')
            for fc in all_fcs_list:
                fc_ID = fc_id_map[str(fc)]

                # Copy the created fields for later use in fields summary
                fc_field_list.append(fc_ID)

                # Add field, select, and calculate
                arcpy.AddField_management("in_memory\\mem_agg_layer",
                                          fc_ID,
                                          "Text",
                                          field_length=20)
                arcpy.SelectLayerByLocation_management(
                    "in_memory\\mem_agg_layer",
                    "WITHIN",
                    fc,
                    selection_type="NEW_SELECTION")
                arcpy.CalculateField_management("in_memory\\mem_agg_layer",
                                                fc_ID, '"' + fc_ID + '"',
                                                "PYTHON_9.3")

                # Get the acres
                fc_acres = sum([
                    row[0] for row in arcpy.da.SearchCursor(
                        "in_memory\\mem_agg_layer", acre_field)
                ])

                # Add key=fc_id and value=acreage to sweet default dictionary
                acreage_counts[fc_ID] = round(fc_acres, 2)

                # Switch the selection
                arcpy.SelectLayerByAttribute_management(
                    "in_memory\\mem_agg_layer", "SWITCH_SELECTION")

                # Clean the table for readability - replace "Null" with ""
                arcpy.CalculateField_management("in_memory\\mem_agg_layer",
                                                fc_ID, '"' + "" + '"',
                                                "PYTHON_9.3")
                arcpy.SelectLayerByAttribute_management(
                    "in_memory\\mem_agg_layer", "CLEAR_SELECTION")

            # Write the markup feature to disc
            output_aggregate_feature_markup = output_path + "\\" + analysis_id + "_Restrictions_Markup"
            arcpy.CopyFeatures_management("in_memory\\mem_agg_layer",
                                          output_aggregate_feature_markup)

            # Create a summary field and get list of other fields
            arcpy.AddField_management(output_aggregate_feature_markup,
                                      "Summary",
                                      "Text",
                                      field_length=255)
            fc_field_list.append('Summary')
            num_of_fields = len(fc_field_list)
            with arcpy.da.UpdateCursor(output_aggregate_feature_markup,
                                       fc_field_list) as cur:
                for row in cur:
                    # There's gotta be a better way to do this... this sucks... yes it does... super duper sucks.. def..
                    row[num_of_fields - 1] = re.sub(
                        '\s+', ' ',
                        (reduce(lambda x, y: x + " " + y,
                                [row[i]
                                 for i in range(num_of_fields - 1)]))).strip()
                    cur.updateRow(row)

            # Get the total analysis acreage
            arcpy.MakeFeatureLayer_management(output_aggregate_feature_markup,
                                              "in_memory\\_markup")
            total_analysis_acres = sum([
                row[0] for row in arcpy.da.SearchCursor(
                    "in_memory\\_markup", acre_field)
            ])
            logger.logfile("Total analysis acres", total_analysis_acres)

            # Get the total marked-up acreage
            logger.console('Creating markup output')
            arcpy.SelectLayerByAttribute_management("in_memory\\_markup",
                                                    "NEW_SELECTION",
                                                    """ "Summary" <> '' """)
            total_markup_acres = sum([
                row[0] for row in arcpy.da.SearchCursor(
                    "in_memory\\_markup", acre_field)
            ])
            logger.logfile("Total markup acres", total_markup_acres)

            # Delete the lingering unmarked output - comment out if you want to keep original with original fields
            arcpy.Delete_management(output_aggregate_feature)

            # Partition datasets - Alternative D
            logger.logfile("alternative", alternative)
            if alternative == "ALT_D":

                # Partition the data sets by ecoregion and write outputs to csv
                logger.console('Partitioning outputs by ecoregions')
                ecoregions = r'T:\CO\GIS\giswork\rgfo\projects\management_plans\ECRMP\Draft_RMP_EIS\1_Analysis\ECRMP_Outputs\boundaries\boundaries.gdb\ECRMP_HumanEcoregions_AltD_20160602'

                # Create a default dict to hold the values
                ecoregion_markup_acres = defaultdict(int)

                # Get a list of ecoregions
                ecoregion_field = "Community_Landscape"
                ecoregion_list = [
                    str(row[0]) for row in arcpy.da.SearchCursor(
                        ecoregions, ecoregion_field)
                ]
                logger.logfile("Ecoregion_list", ecoregion_list)

                # these will be created by split
                ecoregion_out_names = [
                    output_path + "\\" + er for er in ecoregion_list
                ]
                # Rename to these:
                ecoregion_rename = [
                    output_path + "\\" + analysis_id + "__" + er
                    for er in ecoregion_list
                ]
                logger.logfile("Ecoregion_out_names", ecoregion_out_names)

                arcpy.Split_analysis(output_aggregate_feature_markup,
                                     ecoregions, ecoregion_field, output_path)

                # Rename the ecoregion split outputs
                for old_name, new_name in zip(ecoregion_out_names,
                                              ecoregion_rename):
                    arcpy.Rename_management(old_name, new_name)

                for ecoregion_fc in ecoregion_rename:
                    # Get the acres
                    acre_field = get_acres(ecoregion_fc)
                    arcpy.MakeFeatureLayer_management(ecoregion_fc,
                                                      "in_memory\\ecoregion")
                    arcpy.SelectLayerByAttribute_management(
                        "in_memory\\ecoregion", "NEW_SELECTION",
                        """ "Summary" <> '' """)
                    ecoregion_acres = sum([
                        row[0] for row in arcpy.da.SearchCursor(
                            "in_memory\\ecoregion", acre_field)
                    ])

                    # Add key=fc_id and value=acreage to sweet default dictionary
                    ecoregion_markup_acres[os.path.basename(
                        ecoregion_fc)] = round(ecoregion_acres, 2)

            # Write outputs acreages to csv
            logger.console('Creating csv')
            outCSV = child_folder_path + "\\" + analysis_id_time_stamp + '_Acreage.csv'
            with open(outCSV, 'wb') as csvfile:
                csvwriter = csv.writer(csvfile)
                csvwriter.writerow([
                    "Total Analysis Acres",
                    str(round(total_analysis_acres, 2))
                ])
                csvwriter.writerow(["", ""])
                csvwriter.writerow(["", ""])
                # Write the criteria data
                csvwriter.writerow([
                    'Criteria', analysis_id + "_Raw_Acres",
                    analysis_id + "_Rounded_Acres", "Raw_Percent"
                ])
                for fc_id, acres in sorted(acreage_counts.items()):
                    csvwriter.writerow([
                        fc_id, acres,
                        round(acres, -2),
                        ((acres / total_analysis_acres) * 100)
                    ])
                csvwriter.writerow(["", ""])
                csvwriter.writerow(["", ""])
                # Write the total data
                csvwriter.writerow([
                    "Total " + analysis_id + " Acres",
                    round(total_markup_acres, 2),
                    round(total_markup_acres, -2),
                    (total_markup_acres / total_analysis_acres) * 100
                ])

                if alternative == "ALT_D":
                    # Write the ecoregion data
                    csvwriter.writerow(["", ""])
                    csvwriter.writerow(["", ""])
                    csvwriter.writerow(
                        ['Ecoregion', "Raw_Acres", "Rounded_Acres"])
                    for ecoregion, acres in sorted(
                            ecoregion_markup_acres.items()):
                        csvwriter.writerow(
                            [ecoregion, acres,
                             round(acres, -2)])
                    csvwriter.writerow(["", ""])
                    csvwriter.writerow(["", ""])

            logger.log_all('\nSuccessful completion..')

#######################################################################################################################
##
## EXCEPTIONS
##
#######################################################################################################################

        except:
            try:
                logger.log_all(
                    '\n\nTOOL - USE RESTRICTIONS DID NOT SUCCESSFULLY COMPLETE'
                )
                logger.console('See logfile for details')
                logger.log_all('Exceptions:\n')
                logger.log_report('_' * 120 + '\n')
                logger.log_all(str(traceback.format_exc()))
            except:
                pass

#######################################################################################################################
##
## CLEAN-UP
##
#######################################################################################################################

        finally:
            end_time = datetime.datetime.now()
            try:
                logger.log_all("End Time: " + str(end_time))
                logger.log_all("Time Elapsed: %s" %
                               (str(end_time - start_time)))
                del (logger)
            except:
                pass
            deleteInMemory()


#######################################################################################################################
Exemplo n.º 7
0
            dType = desc.dataType.upper()
            path = desc.catalogPath
            bName = desc.baseName

            flds = [x.name for x in desc.fields]
            if not "MUKEY" in flds:
                arcpy.env.addOutputsToMap = True
                PrintMsg('\n \nReloading ' + jLayer + ' due to existing join')
                if dType == 'RASTERLAYER':
                    arcpy.mapping.RemoveLayer(dfs, refLyr)
                    arcpy.MakeRasterLayer_management(path, bName)
                    arcpy.management.AddJoin(bName, "MUKEY", jTbl, "MUKEY")
                    PrintMsg('\n \nAdded join to ' + jLayer)
                elif dType == 'FEATURELAYER':
                    arcpy.mapping.RemoveLayer(dfs, refLyr)
                    arcpy.MakeFeatureLayer_management(path, bName)
                    arcpy.management.AddJoin(bName, "MUKEY", jTbl, "MUKEY")
                    PrintMsg('\n \nAdded join to ' + jLayer)
            else:
                arcpy.management.AddJoin(jLayer, "MUKEY", jTbl, "MUKEY")
                PrintMsg('\n \nAdded join to ' + jLayer)

        except:
            PrintMsg('\n \nUnable to make join to ' + jLayer)


    if len(failMuaggatt) > 0:
        PrintMsg('\n \nThe following muaggatt requests either failed or collected no records:', 1)
        for f in failMuaggatt:
            PrintMsg(f)
#           Select MajorAttractions within 2640 feet
#           Print names of selected majorAttractions
#

try:
    # Variable assignments for GP tasks
    fc1 = "MajorAttractions"
    fc2 = "Railroads"
    fc1Lyr = "MjrAttractLyr"
    fc2Lyr = "RailLyr"
    fc3 = "ClipAttractions"
    fields = ["NAME", "ADDR", "CITYNM", "ZIP"]
    SQLExp = """ "STREET_NAM" LIKE '%TROLLEY' """

    # Create Feature Layers
    arcpy.MakeFeatureLayer_management(fc1, fc1Lyr)
    arcpy.MakeFeatureLayer_management(fc2, fc2Lyr, SQLExp)

    # Perform spatial selection
    arcpy.SelectLayerByLocation_management(fc1Lyr, "WITHIN_A_DISTANCE", fc2Lyr,
                                           "2640 feet", "NEW_SELECTION")

    # Loop through selected features
    print "MajorAttractions within 0.5 mile of Trollies\n"
    with arcpy.da.SearchCursor(fc1Lyr, fields) as cursor:
        for row in cursor:
            print "{0}\n{1}\n{2}, CA {3}\n".format(row[0], row[1], row[2],
                                                   row[3])

    arcpy.CopyFeatures_management(fc1Lyr, fc3)
            # save the query results to feature class
            arcpy.FeatureClassToFeatureClass_conversion(fc_rooms[0], fds_path, 'OS_temp', condition)
            OSFeatureClass = fds_path + os.sep + fc_file

            # dissolve multiple features into single
            arcpy.Dissolve_management(fds_path + os.sep + 'OS_temp', fds_path + os.sep + 'OS_temp2')
            # dissolve boundaries to make a continuous polygon for navigable space
            arcpy.AggregatePolygons_cartography(fds_path + os.sep + 'OS_temp2', OSFeatureClass, aggregationDist,
                                                "0 SquareFeet", "0 SquareFeet", "NON_ORTHOGONAL", "#")
            #arcpy.Delete_management(OSFeatureClass)
            arcpy.Delete_management(fds_path + os.sep + 'OS_temp')
            arcpy.Delete_management(fds_path + os.sep + 'OS_temp2')

             ######### Processing landmarks #########
            condition = "[FLOOR] = " + str(floor_num)
            arcpy.MakeFeatureLayer_management(fc_doors[0], 'DR_lyr', condition)

            if (arcpy.Exists(OSFeatureClass)):
              arcpy.SelectLayerByLocation_management('DR_lyr', "WITHIN_A_DISTANCE", OSFeatureClass, '2 Feet',
                                                     "NEW_SELECTION")
            print arcpy.GetCount_management('DR_lyr')
            count = arcpy.GetCount_management('DR_lyr')
            if (int(count.getOutput(0)) <> 0):
              fc_file = fc_name + "_FLR" + str(floor_num) + "_DR"
              DRFeatureClass = fds_path + os.sep + fc_file

              doors_temp = fds_path + os.sep + 'temp_doors'
              arcpy.FeatureToPoint_management('DR_lyr', doors_temp, point_location="CENTROID")

              rooms_condition = "[FLOOR] = " + str(floor_num) + " AND NOT " + rooms_query
              arcpy.FeatureClassToFeatureClass_conversion(fc_rooms[0], fds_path, 'rooms_temp', rooms_condition)
Exemplo n.º 10
0
def NumberFeatures(areaToNumber, pointFeatures, numberingField,
                   outputFeatureClass):

    descPointFeatures = arcpy.Describe(pointFeatures)
    arcpy.AddMessage("pointFeatures: {0}".format(
        descPointFeatures.catalogPath))

    # If no output FC is specified, then set it a temporary one, as this will be copied to the input and then deleted.
    overwriteFC = False
    if not outputFeatureClass:
        DEFAULT_OUTPUT_LOCATION = r'%scratchGDB%\tempSortedPoints'
        outputFeatureClass = DEFAULT_OUTPUT_LOCATION
        overwriteFC = True
    else:
        descOutputFeatureClass = arcpy.Describe(outputFeatureClass)
        arcpy.AddMessage("outputFeatureClass: {0}".format(
            descOutputFeatureClass.catalogPath))

    # Sort layer by upper right across and then down spatially
    areaToNumberInMemory = os.path.join("in_memory", "areaToNumber")
    arcpy.CopyFeatures_management(areaToNumber, areaToNumberInMemory)
    areaToNumber = areaToNumberInMemory

    DEBUG = True
    appEnvironment = None
    mxd, df, aprx, mp, mapList = None, None, None, None, None
    pointFeatureName = os.path.basename(str(pointFeatures))
    layerExists = False
    try:
        # Check that area to number is a polygon
        descArea = arcpy.Describe(areaToNumber)
        areaGeom = descArea.shapeType
        arcpy.AddMessage("Shape type: " + str(areaGeom))
        if (descArea.shapeType != "Polygon"):
            raise Exception("ERROR: The area to number must be a polygon.")

        #Checking the version of the Desktop Application
        appEnvironment = Utilities.GetApplication()
        if DEBUG == True:
            arcpy.AddMessage("App environment: " + appEnvironment)

        #Getting the layer name from the Table of Contents
        if appEnvironment == Utilities.PLATFORM_PRO:
            from arcpy import mp
            aprx = arcpy.mp.ArcGISProject("CURRENT")
            mapList = aprx.listMaps()[0]
            for lyr in mapList.listLayers():
                if lyr.name == pointFeatureName:
                    layerExists = True
        #else:
        if appEnvironment == Utilities.PLATFORM_DESKTOP:
            from arcpy import mapping
            mxd = arcpy.mapping.MapDocument('CURRENT')
            df = arcpy.mapping.ListDataFrames(mxd)[0]
            for lyr in arcpy.mapping.ListLayers(mxd):
                if lyr.name == pointFeatureName:
                    layerExists = True

        if layerExists == False:
            arcpy.MakeFeatureLayer_management(pointFeatures, pointFeatureName)
        else:
            pointFeatureName = pointFeatures

        # Select all the points that are inside of area
        if areaToNumber:
            arcpy.AddMessage(
                "Selecting points from {0} inside of the area {1}".format(
                    pointFeatureName, areaToNumber))
        else:
            arcpy.AddMessage(
                "Selecting points from {0} inside of the area {1}".format(
                    pointFeatureName, areaToNumber.name))

        selectionLayer = arcpy.SelectLayerByLocation_management(
            pointFeatureName, "INTERSECT", areaToNumber, "#", "NEW_SELECTION")
        if DEBUG == True:
            arcpy.AddMessage(
                "Selected " +
                str(arcpy.GetCount_management(pointFeatureName).getOutput(0)) +
                " points")

        arcpy.AddMessage(
            "Sorting the selected points geographically, left to right, top to bottom"
        )
        arcpy.Sort_management(pointFeatureName, outputFeatureClass,
                              [["Shape", "ASCENDING"]])

        #global numberingField
        if numberingField is None or numberingField == "":
            fnames = [
                field.name for field in arcpy.ListFields(outputFeatureClass)
            ]
            addfield = "Number"
            if addfield in fnames:
                arcpy.AddMessage("Number field is already used")
                numberingField = "Number"
            else:
                arcpy.AddMessage("Add One")
                arcpy.AddMessage(
                    "Adding Number field because no input field was given")
                arcpy.AddField_management(outputFeatureClass, "Number",
                                          "SHORT")
                numberingField = "Number"
        else:
            pass

        # Number the fields
        arcpy.AddMessage("Numbering the fields")
        i = 1
        cursor = arcpy.UpdateCursor(
            outputFeatureClass
        )  # Object: Error in parsing arguments for UpdateCursor
        for row in cursor:
            row.setValue(numberingField, i)
            cursor.updateRow(row)
            i += 1
        # Clear the selection
        arcpy.AddMessage("Clearing the selection")
        arcpy.SelectLayerByAttribute_management(pointFeatureName,
                                                "CLEAR_SELECTION")

        # Overwrite the Input Point Features, and then delete the temporary output feature class
        targetLayerName = ""
        if (overwriteFC):
            arcpy.AddMessage(
                "Copying the features to the input, and then deleting the temporary feature class"
            )
            desc = arcpy.Describe(pointFeatures)
            if hasattr(desc, "layer"):
                overwriteFC = desc.layer.catalogPath
            else:
                overwriteFC = desc.catalogPath

            arcpy.AddMessage("what is the numberingField: " +
                             str(numberingField))
            addfield = "Number"
            fnames1 = [field.name for field in arcpy.ListFields(overwriteFC)]
            if addfield in fnames1:
                arcpy.AddMessage("Number field is already used")
            else:
                arcpy.AddMessage(
                    "Adding Number field to overwriteFC due to no input field")
                arcpy.AddField_management(overwriteFC, "Number")
                arcpy.AddMessage("Added Number field to overwriteFC")

            fields = (str(numberingField), "SHAPE@")

            overwriteCursor = arcpy.da.UpdateCursor(overwriteFC, fields)
            for overwriteRow in overwriteCursor:
                sortedPointsCursor = arcpy.da.SearchCursor(
                    outputFeatureClass, fields)
                for sortedRow in sortedPointsCursor:
                    if sortedRow[1].equals(overwriteRow[1]):
                        overwriteRow[0] = sortedRow[0]
                overwriteCursor.updateRow(overwriteRow)
            arcpy.Delete_management(outputFeatureClass)
            targetLayerName = pointFeatureName
        else:
            targetLayerName = os.path.basename(str(outputFeatureClass))

        # Workaround: don't set the outputFeatureClass if none was supplied to the tool
        if overwriteFC:
            outputFeatureClass = ''

    except arcpy.ExecuteError:
        # Get the tool error messages
        msgs = arcpy.GetMessages()
        arcpy.AddError(msgs)
        print(msgs)

    except:
        # Get the traceback object
        tb = sys.exc_info()[2]
        tbinfo = traceback.format_tb(tb)[0]

        # Concatenate information together concerning the error into a message string
        pymsg = "PYTHON ERRORS:\nTraceback info:\n" + tbinfo + "\nError Info:\n" + str(
            sys.exc_info()[1])
        msgs = "ArcPy ERRORS:\n" + arcpy.GetMessages() + "\n"

        # Return python error messages for use in script tool or Python Window
        arcpy.AddError(pymsg)
        arcpy.AddError(msgs)

        # Print Python error messages for use in Python / Python Window
        print(pymsg + "\n")
        print(msgs)

    return outputFeatureClass
Exemplo n.º 11
0
import time

start = time.time()
arcpy.env.overwriteOutput = True
# Set File workspace
file_workspace = 'B:\\Risk\\Risk.gdb'
env.workspace = file_workspace

# buffer set up
Holdings = 'B:\\Risk\\Risk.gdb\\Holdings\\Holdings_Join'
Holdings_clip = 'B:\\Risk\\Risk.gdb\\Holdings\\Holdings'
distances = [1000, 4000]
unit = "Meters"
# Make a feature layer from Holdings Layer#

arcpy.MakeFeatureLayer_management(Holdings, 'Holdings_Layer')
end = time.time()
print (end - start)
start = time.time()
#open Map Document
mxd = arcpy.mapping.MapDocument(
            'N:\\GIS\Projects\\AA_Leith_Hawkins_TestBed\\Search_Cursor\\Search_Cursor_mxd.mxd')
df = arcpy.mapping.ListDataFrames(mxd, "Layers")[0]
legend = arcpy.mapping.ListLayoutElements(
        mxd, "LEGEND_ELEMENT", "Legend")[0]
legend.autoAdd = False
end = time.time()
print (end - start)
# searchcursor for evey row in dataset to interrogate Holdings Ref nuber#
with arcpy.da.SearchCursor(Holdings, ['Holding_Reference_Number'])as Holdings_Ref_cursor:
    for row in Holdings_Ref_cursor:
Exemplo n.º 12
0
def RotateFeatureClass(inputFC, outputFC, angle=0, pivot_point=None):
    """Rotate Feature Class

    inputFC     Input features
    outputFC    Output feature class
    angle       Angle to rotate, in degrees
    pivot_point X,Y coordinates (as space-separated string)
                Default is lower-left of inputFC

    As the output feature class no longer has a "real" xy locations,
    after rotation, it no coordinate system defined.
    """
    def RotateXY(x, y, xc=0, yc=0, angle=0, units="DEGREES"):
        """Rotate an xy cooordinate about a specified origin
        x,y      xy coordinates
        xc,yc   center of rotation
        angle   angle
        units    "DEGREES" (default) or "RADIANS"
        """
        x = x - xc
        y = y - yc
        # make angle clockwise (like Rotate_management)
        angle = angle * -1
        if units == "DEGREES":
            angle = math.radians(angle)
        xr = (x * math.cos(angle)) - (y * math.sin(angle)) + xc
        yr = (x * math.sin(angle)) + (y * math.cos(angle)) + yc
        return xr, yr

    # temp names for cleanup
    env_file = None
    lyrFC, lyrTmp = [None] * 2  # layers
    tmpFC = None  # temp dataset

    try:
        # process parameters
        try:
            xcen, ycen = [float(xy) for xy in pivot_point.split()]
            pivot_point = xcen, ycen
        except:
            # if pivot point was not specified, get it from
            # the lower-left corner of the feature class
            ext = arcpy.Describe(inputFC).extent
            xcen, ycen = ext.XMin, ext.YMin
            pivot_point = xcen, ycen

        angle = float(angle)

        # set up environment
        env_file = arcpy.CreateScratchName("xxenv", ".xml", "file",
                                           os.environ["TEMP"])
        arcpy.gp.SaveSettings(env_file)

        WKS = env.workspace
        if not WKS:
            if os.path.dirname(outputFC):
                WKS = os.path.dirname(outputFC)
            else:
                WKS = os.path.dirname(arcpy.Describe(inputFC).catalogPath)
        env.workspace = env.scratchWorkspace = WKS

        # Disable any GP environment clips
        arcpy.ClearEnvironment("extent")

        # get feature class properties
        lyrFC = 'lyrFC'
        arcpy.MakeFeatureLayer_management(inputFC, lyrFC)
        dFC = arcpy.Describe(lyrFC)
        shpField = dFC.shapeFieldName
        shpType = dFC.shapeType

        # create temp feature class
        tmpFC = arcpy.CreateScratchName("xxfc", "", "featureclass")

        # Create Feature Class using inputFC as template (so will have "Grid" field)
        arcpy.CreateFeatureclass_management(os.path.dirname(tmpFC),
                                            os.path.basename(tmpFC), shpType,
                                            inputFC)
        lyrTmp = 'lyrTmp'
        arcpy.MakeFeatureLayer_management(tmpFC, lyrTmp)

        ## WORKAROUND: removed below because it was creating a schema lock until Pro/arcpy exited
        ## set up grid field
        #gridField = "Grid"
        #arcpy.AddField_management(lyrTmp, gridField, "TEXT")
        #arcpy.DeleteField_management(lyrTmp, 'ID')

        # rotate the feature class coordinates for each feature, and each feature part

        # open read and write cursors
        updateFields = ['SHAPE@', 'Grid']
        arcpy.AddMessage('Rotating temporary dataset')

        parts = arcpy.Array()
        rings = arcpy.Array()
        ring = arcpy.Array()

        with arcpy.da.SearchCursor(lyrFC, updateFields) as inRows,\
          arcpy.da.InsertCursor(lyrTmp, updateFields) as outRows:
            for inRow in inRows:
                shp = inRow[0]  # SHAPE
                p = 0
                for part in shp:
                    for pnt in part:
                        if pnt:
                            x, y = RotateXY(pnt.X, pnt.Y, xcen, ycen, angle)
                            ring.add(arcpy.Point(x, y, pnt.ID))
                        else:
                            # if we have a ring, save it
                            if len(ring) > 0:
                                rings.add(ring)
                                ring.removeAll()
                    # we have our last ring, add it
                    rings.add(ring)
                    ring.removeAll()
                    # if only one, remove nesting
                    if len(rings) == 1: rings = rings.getObject(0)
                    parts.add(rings)
                    rings.removeAll()
                    p += 1

                # if only one, remove nesting
                if len(parts) == 1: parts = parts.getObject(0)
                if dFC.shapeType == "Polyline":
                    shp = arcpy.Polyline(parts)
                else:
                    shp = arcpy.Polygon(parts)
                parts.removeAll()

                gridValue = inRow[1]  # GRID string
                outRows.insertRow([shp, gridValue])  # write row to output

        arcpy.AddMessage('Merging temporary, rotated dataset with output')
        env.qualifiedFieldNames = False
        arcpy.Merge_management(lyrTmp, outputFC)

    except MsgError as xmsg:
        arcpy.AddError(str(xmsg))
    except arcpy.ExecuteError:
        tbinfo = traceback.format_tb(sys.exc_info()[2])[0]
        arcpy.AddError(tbinfo.strip())
        arcpy.AddError(arcpy.GetMessages())
        numMsg = arcpy.GetMessageCount()
        for i in range(0, numMsg):
            arcpy.AddReturnMessage(i)
    except Exception as xmsg:
        tbinfo = traceback.format_tb(sys.exc_info()[2])[0]
        arcpy.AddError(tbinfo + str(xmsg))
    finally:
        # reset environment
        if env_file: arcpy.gp.LoadSettings(env_file)
        # Clean up temp files
        for f in [lyrFC, lyrTmp, tmpFC, env_file]:
            try:
                if f and arcpy.Exists(f):
                    arcpy.Delete_management(f)
            except:
                pass

        # return pivot point
        try:
            pivot_point = "{0} {1}".format(*pivot_point)
        except:
            pivot_point = None

        return pivot_point
Exemplo n.º 13
0
def zldj_cal(input_path, dltb_path):
    all_shp = hybasic.getfiles(input_path, "shp")
    gbz_shp = hybasic.filter_file(all_shp, "GBZ")
    merge_layer = scratch_gdb + "/merge23"
    arcpy.Merge_management(gbz_shp, output=merge_layer)

    # out_feature_class = scratch_gdb + "/dissolve_layer"

    with arcpy.da.UpdateCursor(merge_layer, ["ZLDJ"]) as cursor:
        for row in cursor:
            if row[0] == u"需要提质改" or row[0] == u"需要提质":
                row[0] = u"需要提质改造"
                cursor.updateRow(row)

    out_feature_class = "dissolve_layer"
    arcpy.Dissolve_management(merge_layer, out_feature_class, "ZLDJ")

    # 按属性选择
    # if arcpy.Exists(scratch_gdb+)

    fuhe = "fuhe"
    jibenfuhe = "jibenfuhe"
    xytzgz = "xytzgz"

    for _ in [fuhe, jibenfuhe, xytzgz]:
        try:
            arcpy.Delete_management(_)
            print "delete:", _
        except:
            print 1

    arcpy.MakeFeatureLayer_management(out_feature_class, "lyr")
    print arcpy.Exists("lyr")
    # 可能部分质量等级没有
    arcpy.SelectLayerByAttribute_management("lyr", "NEW_SELECTION",
                                            " \"ZLDJ\" = '符合' ")
    # 加了 U 报错
    # arcpy.SelectLayerByAttribute_management("lyr", "NEW_SELECTION", " 'ZLDJ' = u'符合' ")
    arcpy.CopyFeatures_management("lyr", "fuhe")
    arcpy.SelectLayerByAttribute_management("lyr", "NEW_SELECTION",
                                            " \"ZLDJ\" LIKE '基本符%' ")
    arcpy.CopyFeatures_management("lyr", "jibenfuhe")
    arcpy.SelectLayerByAttribute_management("lyr", "NEW_SELECTION",
                                            " \"ZLDJ\" LIKE '需要%' ")
    arcpy.CopyFeatures_management("lyr", "xytzgz")
    print u"分离质量等级成功"

    zldj_layers = []
    if arcpy.Exists(fuhe):
        zldj_layers.append(fuhe)
    if arcpy.Exists(jibenfuhe):
        zldj_layers.append(jibenfuhe)
    if arcpy.Exists(xytzgz):
        zldj_layers.append(xytzgz)

    dltb_fields = arcpy.ListFields(dltb_path)
    dltb_field_names = [i.name for i in dltb_fields]
    print dltb_field_names
    if u"地类编码" in dltb_field_names:
        name = u"地类编码"

    else:
        name = "DLBM"  # 大小写是否有影响?
    print "DLTB_NAME:", name

    dltb = "dltb"
    arcpy.MakeFeatureLayer_management(dltb_path, dltb)
    arcpy.SelectLayerByAttribute_management(dltb, "NEW_SELECTION",
                                            name + " LIKE '01%' ")
    for a_shp in zldj_layers:
        new_name = a_shp + "_DLTB"
        arcpy.Identity_analysis(in_features=a_shp,
                                identity_features=dltb,
                                out_feature_class=new_name)
        print "a_shp:", a_shp
        arcpy.MakeFeatureLayer_management(new_name, "a_shp_2")
        arcpy.SelectLayerByAttribute_management("a_shp_2", "NEW_SELECTION",
                                                name + " LIKE '01%' ")

        arcpy.CopyFeatures_management("a_shp_2", a_shp)

    layername_area = [
    ]  # 将图层名和面积组成的列表放进列表 [["fuhe",3455], ["jibenfuhe", 899.976]]
    for i in zldj_layers:
        area = cal_shp_area(i)
        layername_area.append([i, area])

    print layername_area  # [['fuhe', 43179889.3465107], ['jibenfuhe', 28166402.1104696], ['xytzgz', 97004089.0362233]]

    zldj_area = []
    if arcpy.Exists(xytzgz):  # 存在需要提质改造的项目
        area3 = layername_area[-1][1]
        if len(layername_area) > 2:  # 存在前两种情况
            erase_jibenfuhe = "layer0"
            erase_fuhe1 = "layer1"
            erase_fuhe2 = "layer1_2"
            area1 = layername_area[0][1]
            area2 = layername_area[1][1]
            # 需要提质改造和基本符合擦除
            arcpy.Erase_analysis(in_features=jibenfuhe,
                                 erase_features=xytzgz,
                                 out_feature_class=erase_jibenfuhe)
            area_jibenfuhe = show_shp_area(erase_jibenfuhe)  # 基本符合的面积
            # 与基本符合擦除,保留符合
            arcpy.Erase_analysis(in_features=fuhe,
                                 erase_features=erase_jibenfuhe,
                                 out_feature_class=erase_fuhe1)
            # area_jibenfu_part1 = area1-area_fuhe1
            # 与xytzgz擦除,保留符合
            arcpy.Erase_analysis(in_features=erase_fuhe1,
                                 erase_features=xytzgz,
                                 out_feature_class=erase_fuhe2)
            area_fuhe = show_shp_area(erase_fuhe2)  # 符合的面积

            # zldj_area = [] # 三种质量等级的列表
            zldj_area.append([u"符合", area_fuhe])
            zldj_area.append([u"基本符合", area_jibenfuhe])
            zldj_area.append([u"需要提质改造", area3])
        elif len(layername_area) == 2:
            # 只有基本符合和提质改造或者符合图层和提质改造
            erase_ = "layer1_4"
            arcpy.Erase_analysis(in_features=layername_area[0][0],
                                 erase_features=xytzgz,
                                 out_feature_class=erase_)
            erase_area = show_shp_area(erase_)  # 擦除后的面积就是非 提质改造 图层的真实面积
            xytzgz_area = area3
            zldj_area.append([layername_area[0][0], erase_area])
            zldj_area.append([u"需要提质改造", xytzgz_area])
        elif len(layername_area) == 1:
            # 只存在提质改造
            xytzgz_area = layername_area[0][1]
            zldj_area.append([u"需要提质改造", xytzgz_area])
    else:  # 不存在需要提质改造的图层
        if len(layername_area) == 2:
            # 存在符合和基本符合
            erase_fuhe = "layer1_41"
            arcpy.Erase_analysis(in_features=fuhe,
                                 erase_features=jibenfuhe,
                                 out_feature_class=erase_fuhe)
            erase_area_fuhe = show_shp_area(
                erase_fuhe)  # 擦除后的面积就是非 提质改造 图层的真实面积

            zldj_area.append([u"符合", erase_area_fuhe])
            zldj_area.append([u"基本符合", layername_area[1][1]])

        else:
            # 只存在 符合 或者 基本符合 一种图层
            zldj_area.append([layername_area[0][0], layername_area[0][1]])
    a = 0
    for i in zldj_area:
        name, area = i
        print name, area * 0.0015
        a += area * 0.0015
    print a

    zldj_area = [round(_[1] * 0.0015, 4) for _ in zldj_area]

    print "____________________________________________________________________"
    print "____________________________________________________________________"
    print "____________________________________________________________________"
    for i in zldj_area:
        print i

    return zldj_area
Exemplo n.º 14
0
def main(*argv):
    """ main driver of program """
    try:
        features = str(argv[0])#.split(';')
        in_fields = str(argv[1])
        polygon_grid = argv[2]
        grid_polygon = argv[3]
        out_fc_exists = arcpy.Exists(grid_polygon)
        fc = features

        output_gdb = os.path.dirname(grid_polygon)
        #  Local Variables
        #
        results = []
        #fcs = []
        scratchFolder = env.scratchFolder
        scratchGDB = env.scratchGDB
        #  Logic
        #
        master_times = datetime.datetime.now()

        if not out_fc_exists:

            output_gdb = validate_workspace(wrksp=output_gdb)
            #for fc in features:
            arcpy.CopyFeatures_management(in_features=polygon_grid,
                                                     out_feature_class=grid_polygon)
        #fcs.append(grid_polygon)
            grid_polygon = grid_fields(grid=grid_polygon)
            where_clause = None
            grid_sdf = geomotion.SpatialDataFrame.from_featureclass(grid_polygon)

        else:
            arcpy.MakeFeatureLayer_management(grid_polygon, "lyr")
            arcpy.SelectLayerByLocation_management("lyr", "HAVE_THEIR_CENTER_IN", polygon_grid)
            oids = [row[0] for row in arcpy.da.SearchCursor("lyr", "OID@")]
            if len(oids) >1:
                oids_string = str(tuple(oids))
            else:
                oids_string = str('('+ str(oids[0]) + ')')

            where_clause = 'OBJECTID IN ' + oids_string
            grid_sdf = geomotion.SpatialDataFrame.from_featureclass(grid_polygon,
                                        where_clause=where_clause)


        data_sdf = geomotion.SpatialDataFrame.from_featureclass(fc)
        index = data_sdf.sindex
        for idx, row in enumerate(grid_sdf.iterrows()):

            geom = row[1].SHAPE
            oid = row[1].OBJECTID
            ext = [geom.extent.lowerLeft.X, geom.extent.lowerLeft.Y,
                   geom.extent.upperRight.X, geom.extent.upperRight.Y]
            row_oids = list(index.intersect(ext))
            df_current = data_sdf.loc[data_sdf.index.isin(row_oids)]
            sq = df_current['SHAPE'].disjoint(geom) == False
            df_current = df_current[sq].copy()
            if len(df_current) > 0:
                dates = df_current[in_fields].tolist()
                count = len(dates)
                date_list_strings = [d for d in dates]
                date_list = [get_datetime(d) for d in dates]
                year_list = [int(x.year) for x in date_list]
                dom_year, dom_year_count = Counter(year_list).most_common()[0]
                dom_date, dom_date_count = Counter(get_datetime_string(date_list)).most_common()[0]
                count_picket_fences = sum(non_std == datetime.datetime(1902,1,1,0,0) for non_std in date_list)
                count_non_std_dates = sum(non_std == datetime.datetime(1901,1,1,0,0) for non_std in date_list) + count_picket_fences
                date_list_minus = [x for x in date_list if (x != datetime.datetime(1901,1,1,0,0) and x != datetime.datetime(1902,1,1,0,0))]
                if len(date_list_minus)>0:
                    if dom_date == '1902-1-1' or dom_date == '1902-01-01':
                        dom_date = non_std_date
                        dom_year = non_std_year
                        sccore = 6
                        oldest = min(get_datetime_string(date_list_minus))
                        newest = max(get_datetime_string(date_list_minus))
                        change_list = [diff_date(dd) for dd in date_list_minus]
                        count_2year = sum(x <= 2 for x in change_list)
                        count_5year = sum((x <= 5 and x > 2) for x in change_list)
                        count_10year = sum((x <= 10 and x > 5) for x in change_list)
                        count_15year = sum((x <= 15 and x > 10) for x in change_list)
                        count_15year_plus = sum(x >= 15 for x in change_list)
                    elif dom_date == '1901-1-1' or dom_date == '1901-01-01':
                        dom_date = 'NoInformation'
                        dom_year = 0
                        score = 6
                        oldest = min(get_datetime_string(date_list_minus))
                        newest = max(get_datetime_string(date_list_minus))
                        change_list = [diff_date(dd) for dd in date_list_minus]
                        count_2year = sum(x <= 2 for x in change_list)
                        count_5year = sum((x <= 5 and x > 2) for x in change_list)
                        count_10year = sum((x <= 10 and x > 5) for x in change_list)
                        count_15year = sum((x <= 15 and x > 10) for x in change_list)
                        count_15year_plus = sum(x >= 15 for x in change_list)
                    else:
                        dom_date = dom_date
                        dom_year = dom_year
                        oldest = min(get_datetime_string(date_list_minus))
                        newest = max(get_datetime_string(date_list_minus))
                        change_list = [diff_date(dd) for dd in date_list_minus]
                        count_2year = sum(x <= 2 for x in change_list)
                        count_5year = sum((x <= 5 and x > 2) for x in change_list)
                        count_10year = sum((x <= 10 and x > 5) for x in change_list)
                        count_15year = sum((x <= 15 and x > 10) for x in change_list)
                        count_15year_plus = sum(x >= 15 for x in change_list)
                        score = get_currency_score(dom_year)
                else:
                    if dom_date == '1902-01-01':
                        dom_date = non_std_date
                        dom_year = non_std_year
                        oldest = non_std_date
                        newest = non_std_date
                        change_list = 0
                        count_2year = 0
                        count_5year = 0
                        count_10year = 0
                        count_15year = 0
                        count_15year_plus = 0
                        score = 6
                    else:
                        dom_date = 'NoInformation'
                        dom_year = 0
                        oldest = 'NoInformation'
                        newest = 'NoInformation'
                        change_list = 0
                        count_2year = 0
                        count_5year = 0
                        count_10year = 0
                        count_15year = 0
                        count_15year_plus = 0
                        score = 6

                r = (oid,
                        dom_date,
                        dom_date_count,
                        round(dom_date_count * 100.0 / count,1),
                        dom_year,
                        dom_year_count,
                        round(dom_year_count * 100.0 / count,1),
                        oldest,
                        newest,
                        count_non_std_dates,
                        round(float(count_non_std_dates) * 100.0 / count,1),
                        round(float(count_2year) * 100.0 / count,1),
                        round(float(count_5year) * 100.0 / count,1),
                        round(float(count_10year) * 100.0 / count,1),
                        round(float(count_15year) * 100.0 / count,1),
                        round(float(count_15year_plus) * 100.0 / count,1),
                        int(count),
                        int(score))
                results.append(r)
            else:
                results.append(
                    (oid, "None", 0,0,
                     0,0,0, "None",
                     "None",0,0,
                     0,0,0,
                     0,0,0,0))
            if len(results) > 1000:
                extend_table(rows=results, table=grid_polygon)
                results = []
            del df_current
            del ext
            del geom
            del oid
        if len(results) > 0:
            extend_table(rows=results, table=grid_polygon)
            results = []
        del fc
        del data_sdf
        del index
        #arcpy.SetParameter(4, fcs)
        arcpy.AddMessage("Total Time %s" % (datetime.datetime.now() - master_times))
    except arcpy.ExecuteError:
        line, filename, synerror = trace()
        arcpy.AddError("error on line: %s" % line)
        arcpy.AddError("error in file name: %s" % filename)
        arcpy.AddError("with error message: %s" % synerror)
        arcpy.AddError("ArcPy Error Message: %s" % arcpy.GetMessages(2))
    except FunctionError as f_e:
        messages = f_e.args[0]
        arcpy.AddError("error in function: %s" % messages["function"])
        arcpy.AddError("error on line: %s" % messages["line"])
        arcpy.AddError("error in file name: %s" % messages["filename"])
        arcpy.AddError("with error message: %s" % messages["synerror"])
        arcpy.AddError("ArcPy Error Message: %s" % messages["arc"])
    except:
        line, filename, synerror = trace()
        arcpy.AddError("error on line: %s" % line)
        arcpy.AddError("error in file name: %s" % filename)
        arcpy.AddError("with error message: %s" % synerror)
Exemplo n.º 15
0
def mapMatch(j, tol_rs, snapData, tempData, assignData, tempTable, assignTable,
             searchRadius, currentRoute, currentRouteSearch, networkDataSet,
             roadway, gpsDict, snapDict, acceptDict, gp, n_points, n):
    ## searching near segments for gps_j
    if j not in snapDict:
        snapDict = near_segments(j, tempData, roadway, tempTable, searchRadius,
                                 gpsDict, snapDict)
    ## if there is not near segments for kj or ki doesn't have an FID assigned
    if snapDict[j] == []:
        acceptDict[j] = (gpsDict[j]['gpsPoint'], 0)
        ## print "\n\n**********Accepted: {}({})**********\n\n".format(j,0)
    elif len(snapDict[j]) > 0 and acceptDict[j - 1][1] == 0:
        fid_j = snapDict[j][0][1]
        snap_j = (snapDict[j][0][2], snapDict[j][0][3])
        acceptDict[j] = (snap_j, fid_j)
        ## print "\n\n**********Accepted: {}({})**********\n\n".format(j,fid_j)
    else:
        i = j - 1
        forward = False
        current_j = j
        while True:
            ## solving route ki --> kj
            ## print "\n\n----------------Solving route {} --> {} ----------------\n\n".format(i,j)
            fid_i = acceptDict[i][1]
            snap_i = acceptDict[i][0]
            fid_j = snapDict[j][0][1]
            snap_j = (snapDict[j][0][2], snapDict[j][0][3])

            dist, snapSpeed, avSpeed = route_solver(snap_i, snap_j, i, j,
                                                    snapData, networkDataSet,
                                                    currentRoute,
                                                    currentRouteSearch,
                                                    gpsDict, gp)
            ##print "d:{}-->{},{} [mi] -- snapSpeed: {} [mi/hr] -- avSpeed: {} [mi/hr]".format(i,j,dist,snapSpeed,avSpeed)
            ## the average speed is compared with the measured speed.
            if snapSpeed - tol_rs <= avSpeed <= snapSpeed + tol_rs:
                ## k_i --> k_j
                acceptDict[j] = (snap_j, fid_j)
                ## print "\n\n**********Accepted: {}({}) --> {}({})**********\n\n".format(i,fid_i,j,fid_j)
                solution = True
                break
            else:
                ## searching alternative k_j
                set_fid_j = {fid_j}
                alt_i, alt_j = False, False
                for index_temp in range(len(snapDict[j])):
                    _, fid, snap_x, snap_y = snapDict[j][index_temp]
                    if fid not in set_fid_j:
                        snap_j = (snap_x, snap_y)
                        dist, snapSpeed, avSpeed = route_solver(
                            snap_i, snap_j, i, j, snapData, networkDataSet,
                            currentRoute, currentRouteSearch, gpsDict, gp)
                        ##print "d:{}-->{},{} [mi] -- snapSpeed: {} [mi/hr] -- avSpeed: {} [mi/hr]".format(i,j,dist,snapSpeed,avSpeed)
                        if snapSpeed - tol_rs <= avSpeed <= snapSpeed + tol_rs:
                            fid_j = fid
                            acceptDict[j] = (snap_j, fid_j)
                            #print "\n\n**********Accepted (alt_j): {}({}) --> {}({})**********\n\n".format(i,fid_i,j,fid_j)
                            solution = True
                            alt_j = True
                            break
                        else:
                            set_fid_j.add(fid)

                ## searching alternative k_i
                if not alt_j:
                    alt_i = False
                    Radius_val = int(searchRadius.split(" ")[0])
                    if Radius_val <= 30:
                        #print "Radius_val j:{}".format(Radius_val)
                        Radius_val = Radius_val + 5
                        searchRadius = str(
                            Radius_val) + " " + searchRadius.split(" ")[1]
                        snapDict = near_segments(j, tempData, roadway,
                                                 tempTable, searchRadius,
                                                 gpsDict, snapDict)
                    elif i > 1:
                        snap_im = acceptDict[i - 1][0]
                        fid_j = snapDict[j][0][1]
                        snap_j = (snapDict[j][0][2], snapDict[j][0][3])
                        set_fid_i = {fid_i}
                        for _, fid, snap_x, snap_y in snapDict[i]:
                            if fid not in set_fid_i:
                                snap_i = (snap_x, snap_y)
                                dist_A, snapSpeed_A, avSpeed_A = route_solver(
                                    snap_im, snap_i, i - 1, i, snapData,
                                    networkDataSet, currentRoute,
                                    currentRouteSearch, gpsDict, gp)
                                ## print "d:{}-->{},{} [mi] -- snapSpeed: {} [mi/hr] -- avSpeed: {} [mi/hr]".format(i-1,i,dist_A,snapSpeed_A,avSpeed_A)
                                dist_B, snapSpeed_B, avSpeed_B = route_solver(
                                    snap_i, snap_j, i, j, snapData,
                                    networkDataSet, currentRoute,
                                    currentRouteSearch, gpsDict, gp)
                                ## print "d:{}-->{},{} [mi] -- snapSpeed: {} [mi/hr] -- avSpeed: {} [mi/hr]".format(i,j,dist_B,snapSpeed_B,avSpeed_B)
                                if (snapSpeed_A - tol_rs <= avSpeed_A <=
                                        snapSpeed_A + tol_rs) and (
                                            snapSpeed_B - tol_rs <= avSpeed_B
                                            <= snapSpeed_B + tol_rs):
                                    fid_i = fid
                                    acceptDict[i] = (snap_i, fid_i)
                                    acceptDict[j] = (snap_j, fid_j)
                                    ## print "\n\n**********Accepted (alt_i): {}({}) --> {}({})**********\n\n".format(i,fid_i,j,fid_j)
                                    solution = True
                                    alt_i = True
                                    break
                                else:
                                    set_fid_i.add(fid)
                ## there is not more alt_i and alt_j
                Radius_val = int(searchRadius.split(" ")[0])
                if Radius_val <= 30 and not alt_i and not alt_j:
                    #print "Radius_val i:{}".format(Radius_val)
                    Radius_val = Radius_val + 5
                    searchRadius = str(Radius_val) + " " + searchRadius.split(
                        " ")[1]
                    snapDict = near_segments(j, tempData, roadway, tempTable,
                                             searchRadius, gpsDict, snapDict)
                elif not alt_i and not alt_j:
                    ## if we have more points to check
                    if j - i <= n_points:
                        if not forward:
                            ## searching a snap point next to j.
                            j = j + 1
                            if j > n:
                                solution = False
                                acceptDict[current_j] = (
                                    gpsDict[current_j]['gpsPoint'], 0)
                                break
                            else:
                                if j not in snapDict:
                                    snapDict = near_segments(
                                        j, tempData, roadway, tempTable,
                                        searchRadius, gpsDict, snapDict)
                                not_j = False
                                while len(snapDict[j]) == 0:
                                    j = j + 1
                                    if j > n:
                                        not_j = True
                                        break
                                    if j not in snapDict:
                                        snapDict = near_segments(
                                            j, tempData, roadway, tempTable,
                                            searchRadius, gpsDict, snapDict)
                                if not_j:
                                    solution = False
                                    acceptDict[current_j] = (
                                        gpsDict[current_j]['gpsPoint'], 0)
                                    break
                                forward = True
                        else:
                            i = i - 1
                            if i < 1:
                                solution = False
                                acceptDict[current_j] = (
                                    gpsDict[current_j]['gpsPoint'], 0)
                                break
                            forward = False
                    else:
                        solution = False
                        acceptDict[current_j] = (
                            gpsDict[current_j]['gpsPoint'], 0)
                        ## print "\n\n**********Accepted (not solved): {}({})**********\n\n".format(current_j,0)
                        break
                else:
                    break
        if solution:
            ## if there are points between ki and kj
            if j - i > 1:
                assignList = []
                arcpy.MakeFeatureLayer_management(currentRouteSearch,
                                                  "routeIn")
                arcpy.MakeFeatureLayer_management(roadway, "roadway")
                arcpy.SelectLayerByLocation_management(
                    "roadway", 'SHARE_A_LINE_SEGMENT_WITH', "routeIn")
                for row in arcpy.da.SearchCursor("roadway", ["OBJECTID"]):
                    assignList.append(row[0])
                arcpy.SelectLayerByAttribute_management(
                    "roadway", "CLEAR_SELECTION")
                ## print "{}-->{} - Assign List:{}".format(i,j,assignList)
                for oid in range(i + 1, j):
                    ## print "i:{} - j:{}".format(i,j)
                    ## if the point is already in the snap dictionary,
                    ## the closer point is assigned.
                    oidIsAssign = False
                    if oid in snapDict:
                        ## print "{} - near: {}".format(oid,snapDict[oid])
                        if len(snapDict[oid]) > 0:
                            fid_oid = snapDict[oid][0][1]
                        else:
                            fid_oid = 0
                        if fid_oid in assignList:
                            snap_oid = (snapDict[oid][0][2],
                                        snapDict[oid][0][3])
                            acceptDict[oid] = (snap_oid, fid_oid)
                            oidIsAssign = True
                        else:
                            oidIsAssign = False

                    ## if the point is not in the snap dictionary,
                    ## from the fid in the route, the closer one is assigned.
                    if not oidIsAssign:
                        upCursor = arcpy.da.UpdateCursor(
                            assignData, "SHAPE@XY")
                        row = upCursor.next()
                        ## print oid
                        row[0] = (gpsDict[oid]["gpsPoint"][0],
                                  gpsDict[oid]["gpsPoint"][1])

                        upCursor.updateRow(row)
                        del upCursor
                        arcpy.GenerateNearTable_analysis(
                            assignData, "roadway", assignTable, "", "LOCATION",
                            "NO_ANGLE", "CLOSEST")
                        with arcpy.da.SearchCursor(
                                assignTable,
                            ["NEAR_DIST", "NEAR_FID", "NEAR_X", "NEAR_Y"
                             ]) as assignCursor:
                            row = assignCursor.next()
                            ## print row
                            acceptDict[oid] = ((row[2], row[3]), row[1])
    return acceptDict
Exemplo n.º 16
0
        writeMsg("\nStarting Batch job:  " + check + " at " + str(now)[:-7])
        res = arcpy.ExecuteReviewerBatchJob_Reviewer(reviewer_db, session,
                                                     use_rbj, production_db,
                                                     area)
        arcpy.Delete_management("in_memory")
        now = datetime.datetime.now()
        writeMsg("\nFinished at:  \t\t" + str(now)[:-7])
    except Exception as err:
        writeMsg("\n Batch job did not work.\n\n")
        writeMsg(str(err))

# Set variables to run Locate Events along Route tool.
# Start by making a copy of the LRSN Road Network that is filtered with the time stamp def query.
arcpy.MakeFeatureLayer_management(
    production_db + "/INVDB.LRSN_RoadNetwork",
    'useMe',
    where_clause=
    "(FromDate is null or FromDate<=CURRENT_TIMESTAMP) and (ToDate is null or ToDate>CURRENT_TIMESTAMP)"
)
arcpy.CopyFeatures_management('useMe', reviewer_db + '/useMe')
in_features = reviewer_db + "/REVDATASET/REVTABLELINE"
in_routes = reviewer_db + '/useMe'
route_id_field = "ROUTEID"
radius_or_tolerance = "1 Feet"
point_radius = "20 Feet"
out_table_line = reviewer_db + "/LineRT"
props_line = "RID LINE FMEAS TMEAS"
# arcpy.env.overwriteOutput = "true"
# Write log of Locate Events tool
now = datetime.datetime.now()
writeMsg("\nStarting Line Table at: " + str(now)[:-7])
arcpy.LocateFeaturesAlongRoutes_lr(in_features, in_routes, route_id_field,
Exemplo n.º 17
0
 def _makeFeatureLayer(self):
     pp = self.ProgressPrinter.newProcess(inspect.stack()[0][3], 1,
                                          1).start()
     arcpy.MakeFeatureLayer_management(self.XYgrid, self.XYgrid_temp)
     pp.finish()
Exemplo n.º 18
0
out_layer = "station_layer"
saved_layer = outdir + os.path.normpath("/station.lyrx")
arcpy.MakeXYEventLayer_management(table, in_x_field, in_y_field, out_layer,
                                  "4326", "")
# Save to a layer file
arcpy.SaveToLayerFile_management(out_layer, saved_layer)
# Convert the layer to a shapefile

out_path = outdir
out_name = 'stations.shp'
arcpy.FeatureClassToFeatureClass_conversion(saved_layer, out_path, out_name)

# Want to select only assault cases
crimes = outdir + os.path.normpath('/crimes.shp')
# Make a layer
arcpy.MakeFeatureLayer_management(crimes, "projlyr")
# Want to select assault cases

sql_clause = "Primary_Ty = 'ASSAULT'"
out_path = outdir
out_name = 's_crimes.shp'
# Select by map features
arcpy.FeatureClassToFeatureClass_conversion('projlyr', out_path, out_name,
                                            sql_clause)
arcpy.Delete_management(crimes)
# We should project this properly. Give them the projected file and the police station file

# Set folder path:
police_stations = outdir + os.path.normpath('/stations.shp')
assaults = outdir + os.path.normpath('/s_crimes.shp')
Exemplo n.º 19
0
WT_Pipe_main_surf_test = FC_FacAllSite_V_Select
WT_Pipe_main_surf_test__5_ = FC_FacAllSite_V_Select
WT_Pipe_main_surf_test__2_ = FC_FacAllSite_V_Select
WT_Pipe_main_surf_test__7_ = WT_Pipe_main_surf_Layer1__2_
WT_Pipe_main_surf_test__6_ = WT_Pipe_main_surf_test
WT_Pipe_main_surf_test__3_ = WT_Pipe_main_surf_test__5_
WT_Pipe_main_surf_test__8_ = WT_Pipe_main_surf_test__2_
WT_Pipe_main_surf_test__10_ = "\\\\watis\\public\\InternsSpring2017\\Spring2017_WT_GeoDb.mdb\\Spring2017_WT\\WT_Pipe_main_surf_test"

# Process: Calculate Field
arcpy.CalculateField_management(WT_Pipe_main_surf, "Risk_Park", "1", "PYTHON",
                                "")

# Process: Make Feature Layer
arcpy.MakeFeatureLayer_management(
    WT_Pipe_main_surf__2_, WT_Pipe_main_surf_Layer1, "", "",
    "OBJECTID OBJECTID VISIBLE NONE;GID GID VISIBLE NONE;LAYER LAYER VISIBLE NONE;PIPEID PIPEID VISIBLE NONE;MATERIAL MATERIAL VISIBLE NONE;MaterialDesc MaterialDesc VISIBLE NONE;DIAMETER DIAMETER VISIBLE NONE;MEASUREDLE MEASUREDLE VISIBLE NONE;ACTUALLENG ACTUALLENG VISIBLE NONE;INSTALLATI INSTALLATI VISIBLE NONE;DRAWINGNO DRAWINGNO VISIBLE NONE;SOURCE SOURCE VISIBLE NONE;PressureZo PressureZo VISIBLE NONE;RiskCondition RiskCondition VISIBLE NONE;RiskFactor RiskFactor VISIBLE NONE;RiskIndex RiskIndex VISIBLE NONE;Shape Shape VISIBLE NONE;Z_Min Z_Min VISIBLE NONE;Z_Max Z_Max VISIBLE NONE;Z_Mean Z_Mean VISIBLE NONE;SLength SLength VISIBLE NONE;Min_Slope Min_Slope VISIBLE NONE;Max_Slope Max_Slope VISIBLE NONE;Avg_Slope Avg_Slope VISIBLE NONE;Shape_Length Shape_Length VISIBLE NONE;Risk_Pipe Risk_Pipe VISIBLE NONE;Risk_TruckRoute Risk_TruckRoute VISIBLE NONE;Risk_School Risk_School VISIBLE NONE;Risk_Business Risk_Business VISIBLE NONE;Risk_Facility Risk_Facility VISIBLE NONE;Risk_Park Risk_Park VISIBLE NONE;Risk_PumpStation Risk_PumpStation VISIBLE NONE;Risk_Creek Risk_Creek VISIBLE NONE;Risk_Diameter Risk_Diameter VISIBLE NONE;Likelihood Likelihood VISIBLE NONE;Consequence Consequence VISIBLE NONE;Risk_Index Risk_Index VISIBLE NONE"
)

# Process: Select
arcpy.Select_analysis(FC_FacAllSite_V, FC_FacAllSite_V_Select,
                      "[TYPE] = 'Park'")

# Process: Select Layer By Location
arcpy.SelectLayerByLocation_management(WT_Pipe_main_surf_Layer1,
                                       "WITHIN_A_DISTANCE",
                                       FC_FacAllSite_V_Select, "1000 Feet",
                                       "NEW_SELECTION", "NOT_INVERT")

# Process: Calculate Field (2)
arcpy.CalculateField_management(WT_Pipe_main_surf_Layer1__2_, "Risk_Park", "2",
                                "PYTHON", "")
Exemplo n.º 20
0
def create_gdb(out_folder, out_name, outpath):
    if not arcpy.Exists(outpath):
        arcpy.CreateFileGDB_management(out_folder, out_name, "CURRENT")


start_time = datetime.datetime.now()
print "Start Time: " + start_time.ctime()
today = datetime.datetime.today()
date = today.strftime('%Y%m%d')

# Creates output gdb if it does not exists
create_gdb(os.path.dirname(out_vector_projected),
           os.path.basename(out_vector_projected), out_vector_projected)

# Creates feature layers of the county (sum) and huc vector files to be used in all of the intersections
arcpy.MakeFeatureLayer_management(in_sum_file, "fc")
arcpy.MakeFeatureLayer_management(in_huc_vector, "huc_fc")

# set workspace in the environmental setting in to get a list is of species files to run the intersect on
arcpy.env.workspace = invector_location
list_species_vector = arcpy.ListFeatureClasses()
print(list_species_vector)  # Print a list of files that will intersected

# Loop on each species composite file, performs the intersections in memory, add the ID field for each intersection so
# data can be extracted for the different data sources
for species_comp in list_species_vector:
    state_sp = datetime.datetime.now()

    # Deleted the previous species layer stored in memory
    arcpy.Delete_management("in_spe_vector")
    arcpy.Delete_management(r"in_memory\\vector_" + str(species_comp))
Exemplo n.º 21
0
def clean_unionfiles(outfc, final, group, df):
    listfields = [f.name for f in arcpy.ListFields(outfc)]
    print listfields

    ent_fields = []
    group_spe = []

    arcpy.Delete_management("out")
    arcpy.MakeFeatureLayer_management(outfc, "out")

    for field in listfields:
        if field.startswith('EntityID'):
            ent_fields.append(field)
    # ent_fields.append('OBJECTID')
    ent_fields.append("ZoneID")
    print ent_fields

    arcpy.AddField_management("out", 'ZoneSpecies', "TEXT", "", "", "1000", "", "NULLABLE", "NON_REQUIRED", "")
    arcpy.AddField_management(fc, "ZoneID", "DOUBLE")

    with arcpy.da.UpdateCursor(fc, ['OBJECTID', 'ZoneID']) as cursor:  #NOTE CONFIRM OBKECTID IS THE KEY ID FOR FILE
        for row in cursor:
            row[1] = row[0]
            cursor.updateRow(row)
    zonesp = {}
    with arcpy.da.SearchCursor(outfc, ent_fields) as cursor:
        for row in cursor:
            listsp = []
            for field in ent_fields:
                index_f = ent_fields.index(field)
                if field == 'ZoneID':
                    zoneid = row[index_f]
                else:
                    ent = row[index_f]
                    if str(ent) == '':
                        continue
                    else:
                        listsp.append(ent)
                        if str(ent) not in group_spe:
                            group_spe.append(ent)
            # print listsp
            zonesp[zoneid] = listsp
        del cursor, listsp

    with arcpy.da.UpdateCursor("out", ['ZoneID', 'ZoneSpecies']) as cursor:
        for row in cursor:
            listsp = zonesp[row[0]]
            row[1] = str(listsp)
            cursor.updateRow(row)
        del cursor

    delfields = [f.name for f in arcpy.ListFields(outfc) if not f.required]
    delfields.remove('ZoneSpecies')
    layer = "temp_lyr"
    arcpy.MakeFeatureLayer_management(outfc, layer)

    desc = arcpy.Describe(layer)
    field_info = desc.fieldInfo
    # place holder for field mapping str- this is much faster than deleting field
    field_info_str = ''
    try:
        for index in range(0, field_info.count):
            field_nm = ("{0}".format(field_info.getFieldName(index)))
            if field_nm in delfields:
                # add column to field mapping str for layer creation; structure [field name] [field name] [HIDDEN]; next
                field_info_str += field_nm + ' ' + field_nm + ' HIDDEN;'
                # print("\tVisible:    {0}".format(field_info.getVisible(index)))
    except:
        pass
    # print field_info_str
    field_info_str.rstrip(';')
    arcpy.MakeFeatureLayer_management(outfc, "lyr", field_info=field_info_str)
    arcpy.CopyFeatures_management("lyr", final)
    arcpy.Delete_management("temp_lyr")
    arcpy.Delete_management("lyr")

    group_spe = list(set(group_spe))
    count = len(group_spe)
    remaining = [None] * (1000 - count)  # all columns need to 1000 rows - makes additional rows with value none
    mergelist = group_spe + remaining
    series_sp = pd.Series(mergelist)
    out_df[group] = series_sp.values
Exemplo n.º 22
0
log = open(directory + '\log' + str(int(time()))[-8:] + '.txt', 'a+')
log.write('------------------------------------------------------------------------------------------' + '\n')
log.write('Event log for least cost path analysis between locations in: ' + '\n')
log.write(fc_one + '\n')
log.write(fc_two + '\n')
log.write('Event log created: ' + asctime() + '\n')
log.write('------------------------------------------------------------------------------------------' + '\n')

# Starts analysis, computing pathdistance and backlink rasters for each location in fc_one, and then the cost_path from
# each locaiton in fc_two back to each location in fc_one.
with arcpy.da.SearchCursor(fc_one, [fc_one_loc_name, fc_one_loc_filename]) as cursor:
    for row in cursor:
        loc_one_name = row[0]
        loc_one_filename = row[1]
        print('Calculating path distance and backlink raster for site: ' + loc_one_name)
        arcpy.MakeFeatureLayer_management(fc_one, 'source',
                                          '"{}" = \'{}\''.format(fc_one_loc_filename, loc_one_filename))
        pd_raster = path_distance('source', digital_elevation_model, vertical_factor, loc_one_filename)
        in_cost_backlink_raster = directory + r'\backlink\bl_' + loc_one_filename

        with arcpy.da.SearchCursor(fc_two, [fc_two_loc_name, fc_two_loc_filename]) as cursor:
            for row in cursor:
                start_subtime = time()
                loc_two_name = row[0]
                loc_two_filename = row[1]
                arcpy.MakeFeatureLayer_management(fc_two, 'destination', '"{}" = \'{}\''.format(fc_two_loc_filename,
                                                                                                loc_two_filename))
                out_cost_path = cost_path('destination', pd_raster, in_cost_backlink_raster)
                convert(out_cost_path, loc_one_filename, loc_two_filename, loc_one_name, loc_two_name)
                end_subtime = time()
                subtime = end_subtime - start_subtime
                print('Finished generating least cost path between ' + loc_one_name + ' and ' + loc_two_name +
Exemplo n.º 23
0
        # Each field in the first row, the subdivision field, will have the user input subdivision
        row[0] = subdivision
        # Each field in the second row, the wo number field, will have the user input wo number
        row[1] = wonumber
        # update the row
        cursor.updateRow(row)
# delete the cursor
del cursor

#-----------------------------------------------------------------------

# This next section will iterate through a point file containing lot numbers and input those lot numbers
# into the house number field in our new feature class, fcUpdate

# Make a layer from previously created new feature class
arcpy.MakeFeatureLayer_management(fcUpdate, "Parcels_Temp")

# Establish Field Mapping
# NOTE: This field mapping section is leftover code. I could not get the field mappings to map properly
#  and instead used delete fields as mentioned below.
fieldMappings = arcpy.FieldMappings()
# Add field map objects for lot numbers (TEXTSTRING) and expected output field (HouseNumber)
fieldMappings.addTable(Lotnumber)
fieldMappings.addTable(fcUpdate)

# Create a list of only fields we want to keep
TemplateList = arcpy.ListFields(fcUpdate)
keeper = ["TEXTSTRING"]
for field in TemplateList:
    keeper.append(field)
Exemplo n.º 24
0
if Output_PGDB == '#' or not Output_PGDB:
    Output_PGDB = GISWorkspace # provide a default value if unspecified

arcpy.env.workspace = Output_PGDB
print ("Workspace set to "+Output_PGDB)
 
# Local variables, text manipulation for filenames:
ScaleFactor = SlidingDistance.replace(" ","_")
ScaleFactor = ScaleFactor.replace(".","_")
AccidentBuffer = "AccidentBuffer"+ScaleFactor
Buffer1Join = "Buffer1Join"+ScaleFactor
CrashHotSpot = "CrashHotSpot"+ScaleFactor

print "will now process data to perform "+CrashHotSpot
# Process: Buffer
arcpy.Buffer_analysis(Accident_Events, AccidentBuffer, SlidingDistance, "FULL", "ROUND", "NONE", "")
print AccidentBuffer+" created in "+Output_PGDB

# Process: Spatial Join
arcpy.SpatialJoin_analysis(AccidentBuffer, Accident_Events, Buffer1Join, "JOIN_ONE_TO_ONE", "KEEP_COMMON", "", "INTERSECT", "", "")
print Buffer1Join+" created in "+Output_PGDB

# Process: Feature To Point
arcpy.FeatureToPoint_management(Buffer1Join, CrashHotSpot, "CENTROID")
print CrashHotSpot+" created in "+Output_PGDB, " process completed."

# Add the point data to the map
arcpy.MakeFeatureLayer_management(CrashHotSpot, CrashHotSpot, "#", Output_PGDB)
arcpy.RefreshActiveView()
arcpy.RefreshTOC()
Exemplo n.º 25
0
    uc = arcpy.da.UpdateCursor(fc, instfd)
    for row in uc:
        if coord.FID==row[0]:
            print coord.FID, coord.LclCordE, coord.LclCordN
            newPt = [coord.FID,str(coord.LclCordE),str(coord.LclCordN)]
            uc.updateRow(newPt)
    del uc

# Create a new shapefile with transformed geometries
for coord in listPoints:
    geometry = coord.BuildGeometry(coord)
    pntGemtry.append(geometry)

# Check if the shapefile is existing, if yes update shapefile
if os.path.isfile(resultfile):
    files = glob.glob(resultfd + '/*')
    for f in files:
        os.remove(f)
    arcpy.CopyFeatures_management(pntGemtry,of)
else:
    arcpy.CopyFeatures_management(pntGemtry,of)

#for field in fieldList:
    #if field.name != 'FID' and field.name != 'Shape':
        #arcpy.AddField_management(resultfile, field.name,field.type,field.precision,field.scale,field.length,field.aliasName,field.isNullable,field.required,field.domain)

arcpy.MakeFeatureLayer_management(fc, "pk")
arcpy.JoinField_management(resultfile,'FID',"pk",'FID')
arcpy.DeleteField_management(resultfile,'id')

print "\nProgram Completed, please, check data in result folder"
Exemplo n.º 26
0
def main():

    # Initialize Variables
    arcpy.env.overwriteOutput = True
    watershed_list = []
    to_merge_tor = []
    to_merge_bor = []
    to_merge_network = []
    temps_to_delete = []
    watershed_layer = "Watershed"
    arcpy.MakeFeatureLayer_management(watersheds, watershed_layer)

    # Remove all existing content
    for folder in get_folder_list(root_folder, True):
        remove_folder(folder)

    # Get the name of every watershed
    # TODO make more general
    for row in arcpy.da.SearchCursor(watersheds, ["Name"]):

        watershed_list.append(row[0])

    # This loops for each watershed
    for watershed in watershed_list:

        arcpy.AddMessage("Starting " + watershed + "...")

        # Create folder structure within root folder for this watershed
        watershed_folder = make_structure(root_folder, watershed)

        # Get the boundary of this watershed
        query = 'NAME = \'' + watershed + '\''
        arcpy.SelectLayerByAttribute_management(watershed_layer,
                                                'NEW_SELECTION', query)
        clipped_watershed = os.path.join(watershed_folder, "Inputs",
                                         "Watershed_Boundary",
                                         "Watershed_Boundary.shp")
        arcpy.CopyFeatures_management(watershed_layer, clipped_watershed)
        arcpy.SelectLayerByAttribute_management(watershed_layer,
                                                "CLEAR_SELECTION")

        # Clip the TOR points to this watershed and save them
        tor_save_location = os.path.join(watershed_folder, "Inputs", "Points",
                                         "TOR_Points.shp")
        tor_temp_location = os.path.join(watershed_folder, "Inputs", "Points",
                                         "TOR_Points_Temp.shp")
        temps_to_delete.append(tor_temp_location)
        arcpy.Clip_analysis(tor_points, clipped_watershed, tor_temp_location)

        # Only save one point per reach and site
        year_dif_field = create_year_distance(tor_temp_location, "yr",
                                              data_year)
        delete_identical(tor_temp_location, "SiteID", year_dif_field, "RchID",
                         tor_save_location)
        to_merge_tor.append(tor_save_location)

        # Clip the BOR points to this watershed and save them
        bor_save_location = os.path.join(watershed_folder, "Inputs", "Points",
                                         "BOR_Points.shp")
        bor_temp_location = os.path.join(watershed_folder, "Inputs", "Points",
                                         "BOR_Points_Temp.shp")
        temps_to_delete.append(bor_temp_location)
        arcpy.Clip_analysis(bor_points, clipped_watershed, bor_temp_location)

        # Only save one point per reach and site
        create_year_distance(bor_temp_location, "yr")
        delete_identical(bor_temp_location, "SiteID", year_dif_field, "RchID",
                         bor_save_location)
        to_merge_bor.append(bor_save_location)

        # Clip the stream_network to this watershed and save it
        stream_save_location = os.path.join(watershed_folder, "Inputs",
                                            "Stream_Network",
                                            "Stream_Network.shp")
        arcpy.Clip_analysis(stream_network, clipped_watershed,
                            stream_save_location)
        # TODO make more general
        to_merge_network.append(stream_save_location)

    arcpy.AddMessage("Starting Project Wide...")

    # Make a folder to contain Project Wide outputs and inputs
    project_folder = make_structure(root_folder, "00_ProjectWide")

    arcpy.AddMessage("\t Saving TOR Points...")
    # Merge every Watershed's TOR points, and save it to the ProjectWide folder
    tor_save_location = os.path.join(project_folder, "Inputs", "Points",
                                     "TOR_Points.shp")
    arcpy.Merge_management(to_merge_tor, tor_save_location)

    arcpy.AddMessage("\t Saving BOR Points...")
    # Merge every Watershed's BOR points, and save it to the ProjectWide folder
    bor_save_location = os.path.join(project_folder, "Inputs", "Points",
                                     "BOR_Points.shp")
    arcpy.Merge_management(to_merge_bor, bor_save_location)

    arcpy.AddMessage("\t Saving Stream Network...")

    # Take Stream Network, and save it to the ProjectWide folder
    stream_save_location = os.path.join(project_folder, "Inputs",
                                        "Stream_Network", "Stream_Network.shp")
    arcpy.Copy_management(stream_network, stream_save_location)

    arcpy.AddMessage("\t Saving Watersheds...")
    # Take Watershed Boundaries, and save it to the ProjectWide folder
    wat_save_location = os.path.join(project_folder, "Inputs",
                                     "Watershed_Boundary",
                                     "Watershed_Boundary.shp")
    arcpy.Copy_management(watersheds, wat_save_location)

    delete_temps(temps_to_delete)

    finish()
def addressParcels_Generic(newParcels, parcelFld, parcelAddrFld):

    print(' ** Getting other addresses from Parcels that are not in the address points')

    exp = "{0} IS NULL AND {1} <> '' AND {1} IS NOT NULL".format('PARCEL_ADD', parcelAddrFld)
    print ('Where Clause is', exp)

    arcpy.MakeFeatureLayer_management(newParcels, 'newParcelsFl', exp)
    print(arcpy.GetCount_management('newParcelsFl'),' records with Parcel addresses and no Address Point')

    #  CALC PARCEL_ADD & OrigAddress
    def calcAddress():

        print(' ** Calculating PARCEL_ADD & OrigAddress for parcels that do not have address points')

        with arcpy.da.UpdateCursor('newParcelsFl', ['PARCEL_ADD', 'OrigAddress', parcelAddrFld]) as rows:

            for row in rows:

                if row[2] != None and row[2].split(' ')[0].isdigit():

                    row[0] = row[2] #.strip().replace('  ',' ')     #PARCEL_ADD
                    row[1] = row[2]     #OrigAddress

                rows.updateRow(row)

    calcAddress()

    arcpy.Delete_management('newParcelsFl')

    # Clean Addresses
    def cleanAddresses():

        print(' * Cleaning Addresses')

        def SkipName(value):
            skipNameList = ['1940 E 5625 S', '430 E 1525 N', '530 E 3625 N', '890 E 2675 N', '930 E 2675 N']
            return any(value.find(e) > -1 for e in skipNameList)

        exp = "{0} IS NOT NULL AND {1} IS NULL".format('PARCEL_ADD', 'PT_ADDRESS')
        print ('NEW Where Clause is', exp)

        with arcpy.da.UpdateCursor(newParcels, ['PARCEL_ADD', 'OrigAddress', 'OID@'], exp) as rows:

            for row in rows:

                if SkipName(row[1]):
                    row[0] = row[1].replace("ROAD","RD").replace("LANE","LN").replace("DRIVE","DR").replace("STREET","ST").replace("CIRCLE", "CIR")\
                        .strip().upper().replace('  ', ' ')

                else:

                    try:
                        address = address_parser.Address(row[1])
                        row[0] = address.normalized

                    except:
                        print(' * Problem with ObjectID:', row[2], '| Original Address:', row[1])

                        row[0] = row[1].replace('ROAD', 'RD').replace('LANE', 'LN').replace('DRIVE', 'DR').replace('STREET', 'ST').replace('CIRCLE', 'CIR')\
                            .replace('NORTH', 'N').replace('SOUTH', 'S').replace('EAST', 'E').replace('WEST', 'W')\
                            .strip().upper().replace('  ', ' ').replace('.','').replace(',','').replace(' RD RD',' RD')

                rows.updateRow(row)
    cleanAddresses()
Exemplo n.º 28
0
def Fragility():

    status("STARTING FRAGILITY EXTRACTION")

    # subset collection lines to pipes only
    status("Subsetting collection system to pipes only")
    pipes = arcpy.MakeFeatureLayer_management(config.collection_lines, "pipes", "LAYER_GROUP in ( 'SEWER PIPES' , 'STORM PIPES' )")
    print str(arcpy.GetCount_management(pipes)) + " pipes"

    # save copy of pipes to output
    datestamp = datetime.datetime.today().strftime('%Y%m%d')
    outfile = "fragility_WB_" + datestamp
    full_outfile = os.path.join(config.resiliency_gdb, outfile)
    status("Copying pipes to output - called " + outfile)
    fragility_pipes = arcpy.CopyFeatures_management(pipes, full_outfile) # THIS IS A CITY-WIDE VERSION

    # MATERIAL VALUE PATCH
    # creates a lookup dictionary from the Nulls spreadsheet
    # use to fill the MATERIAL field for the records that match the key val Compkeys
    # use "if compkey = x and origval = y then set = to newval - this serves as a check that you're not overwriting valid values
    patch_dict = util.createMaterialPatch_dict(config.materialPatch_xls)

    # add all necessary fields
    util.addFields(fragility_pipes)

    # DATA PATCHES -----------------------------------------------------------------------------
    # fix for materials that are weird - only affects a few pipes
    status("Adjusting a few erroneous pipe values")
    with arcpy.da.UpdateCursor(fragility_pipes, ["COMPKEY","MATERIAL"]) as cursor:
        for row in cursor:
            if row[0] == 132037:
                row[1] = "PVC"
            elif row[0] ==490799:
                row[1] = "CIPP"
            cursor.updateRow(row)

    # patch backbone Null values using patch_dict
    status("Patching missing Materials in backbone segments")
    util.patch_Materials(fragility_pipes, patch_dict)


    # CONDITION AND EXTRACT DATA --------------------------------------------------------------------

    # get PGV value from raster
    # convert pipes to points
    status("Converting pipes to points")
    pipe_points = arcpy.FeatureToPoint_management(pipes,"in_memory\pipe_points")
    # extract raster values to points
    status("Extracting DOGAMI PGV raster values to points")
    arcpy.CheckOutExtension("Spatial")
    PGV_values = arcpy.sa.ExtractValuesToPoints(pipe_points, config.DOG_PGV, "in_memory\PGV_values", "NONE", "VALUE_ONLY")
    # assign value to fragility_pipes
    status("Assigning PGV values to fragility_pipes")
    CopyFieldFromFeature(PGV_values, "COMPKEY", "RASTERVALU", fragility_pipes, "COMPKEY", "PGV")

    # get other values from vectors
    status("Extracting Liq_Prob values") # this one is not aggregated as it is a text value
    targetFC = fragility_pipes
    targetField = "Liq_Prob"
    ID = "COMPKEY"
    overlapFC = config.PWB_Liq
    overlapField = "LiqExpl"
    result = arcpy.Intersect_analysis([targetFC,overlapFC],"in_memory\sect_result","NO_FID","","LINE")
    values={}
    with arcpy.da.SearchCursor(result,[ID,overlapField]) as cursor:
        for row in cursor:
            if row[0] != None:
                values[row[0]] = row[1]

    with arcpy.da.UpdateCursor(targetFC,[ID, targetField]) as cursor:
        for row in cursor:
            if row[0] in values:
                if values[row[0]] != None:
                    row[1] = values[row[0]]
            cursor.updateRow(row)

    # these are aggregated (MAX value taken)
    status("Extracting PGD_LS values")
    calcField_fromOverlap(fragility_pipes, "PGD_LS", "COMPKEY", config.PWB_LS, "LATERALSPREAD_80pct")
    status("Extracting PGD_Set values")
    calcField_fromOverlap(fragility_pipes, "PGD_Set", "COMPKEY", config.PWB_GS, "Ground_Settlement_80pct")
    status("Extracting PGD_Landslide values")
    calcField_fromOverlap(fragility_pipes, "PGD_Landslide", "COMPKEY", config.PWB_LD, "DEF_FEET_80pct")

    # convert PGD field values from feet to inches
    status("Converting PGD values from feet to inches")
    convertfields = ("PGD_LS", "PGD_Set", "PGD_Landslide")
    for field in convertfields:
        with arcpy.da.UpdateCursor(fragility_pipes, [field]) as cursor:
            for row in cursor:
                if row[0] is not None:
                    row[0] = row[0]*12
                cursor.updateRow(row)

    status("Re-setting lowest range Landslide values to 0")
    with arcpy.da.UpdateCursor(fragility_pipes, ["PGD_Landslide"]) as cursor:
            for row in cursor:
                if row[0] == config.PGD_Landslide_val:
                    row[0] = 0
                cursor.updateRow(row)

    # calculate aggregate PGD (LS + Set) - nothing can stop my pythagorean style
    status("Calculating PGD_Liq_Tot")
    with arcpy.da.UpdateCursor(fragility_pipes, ["PGD_Liq_Tot", "PGD_LS", "PGD_Set"]) as cursor:
            for row in cursor:
                if row[1] is not None and row[2] is not None:
                    row[0] = pow((pow(row[1],2) + pow(row[2],2)),0.5)
                elif row[1] is None and row[2] is not None:
                    row[0] = row[2]
                elif row[1] is not None and row[2] is None:
                    row[0] = row[1]
                cursor.updateRow(row)


    # calculate K values using materials and dictionaries
    util.calcValues(fragility_pipes)


    status("Updating Decision field")
    util.updateDecisionField(fragility_pipes, "PGD_Liq_Tot", "RR_Don_FIN", "PGD_Set")


    status("FRAGILITY EXTRACTION COMPLETE")
    print "Output saved to: " + full_outfile

    return fragility_pipes
Exemplo n.º 29
0
# Process: Project
arcpy.Project_management(
    Tax_Parcels_shp, TaxParcel_PRJ_shp__2_,
    "PROJCS['NAD_1983_StatePlane_Washington_South_FIPS_4602_Feet',GEOGCS['GCS_North_American_1983',DATUM['D_North_American_1983',SPHEROID['GRS_1980',6378137.0,298.257222101]],PRIMEM['Greenwich',0.0],UNIT['Degree',0.0174532925199433]],PROJECTION['Lambert_Conformal_Conic'],PARAMETER['False_Easting',1640416.666666667],PARAMETER['False_Northing',0.0],PARAMETER['Central_Meridian',-120.5],PARAMETER['Standard_Parallel_1',45.83333333333334],PARAMETER['Standard_Parallel_2',47.33333333333334],PARAMETER['Latitude_Of_Origin',45.33333333333334],UNIT['Foot_US',0.3048006096012192]]",
    "'NAD_1983_HARN_To_WGS_1984_2 + WGS_1984_(ITRF00)_To_NAD_1983'",
    "PROJCS['NAD_1983_HARN_StatePlane_Washington_South_FIPS_4602_Feet',GEOGCS['GCS_North_American_1983_HARN',DATUM['D_North_American_1983_HARN',SPHEROID['GRS_1980',6378137.0,298.257222101]],PRIMEM['Greenwich',0.0],UNIT['Degree',0.0174532925199433]],PROJECTION['Lambert_Conformal_Conic'],PARAMETER['False_Easting',1640416.666666667],PARAMETER['False_Northing',0.0],PARAMETER['Central_Meridian',-120.5],PARAMETER['Standard_Parallel_1',45.83333333333334],PARAMETER['Standard_Parallel_2',47.33333333333334],PARAMETER['Latitude_Of_Origin',45.33333333333334],UNIT['Foot_US',0.3048006096012192]]",
    "PRESERVE_SHAPE", "")

# Process: Add Attribute Index
arcpy.AddIndex_management(TaxParcel_PRJ_shp__2_, "TaxParcelN", "",
                          "NON_UNIQUE", "NON_ASCENDING")

# Process: Make Feature Layer
arcpy.MakeFeatureLayer_management(
    TaxParcel_PRJ_shp, TaxParcel_PRJ_Layer, "", "",
    "FID FID VISIBLE NONE;Shape Shape VISIBLE NONE;OBJECTID OBJECTID VISIBLE NONE;TaxParcelN TaxParcelN VISIBLE NONE;TaxParcelT TaxParcelT VISIBLE NONE;TaxParcelL TaxParcelL VISIBLE NONE;TaxParce_1 TaxParce_1 VISIBLE NONE;TaxParcelU TaxParcelU VISIBLE NONE;Land_Acres Land_Acres VISIBLE NONE;Land_Value Land_Value VISIBLE NONE;Improvemen Improvemen VISIBLE NONE;Taxable_Va Taxable_Va VISIBLE NONE"
)

# Process: Make Table View
arcpy.MakeTableView_management(
    Taxpayer, Taxpayer_View, "", "",
    "OBJECTID OBJECTID VISIBLE NONE;PARCELNUMBER PARCELNUMBER VISIBLE NONE;TAXPAYERNAME TAXPAYERNAME VISIBLE NONE;CAREOF CAREOF VISIBLE NONE;TAXPAYERADDRESS TAXPAYERADDRESS VISIBLE NONE;TAXPAYERCITY TAXPAYERCITY VISIBLE NONE;TAXPAYERSTATE TAXPAYERSTATE VISIBLE NONE;TAXPAYERCOUNTRY TAXPAYERCOUNTRY VISIBLE NONE;TAXPAYERZIPCODE TAXPAYERZIPCODE VISIBLE NONE"
)

# Process: Add Join
arcpy.AddJoin_management(TaxParcel_PRJ_Layer, "TaxParcelN", Taxpayer_View,
                         "PARCELNUMBER", "KEEP_ALL")

# Process: Make Table View (2)
arcpy.MakeTableView_management(
    Tax_account, Tax_account_View, "", "",
Exemplo n.º 30
0
def locations_add_links(logger, the_scenario, modal_layer_name,
                        max_artificial_link_distance_miles):

    # ADD LINKS LOGIC
    # first we near the mode to the locations fc
    # then we iterate through the near table and build up a dictionary of links and all the near XYs on that link.
    # then we split the links on the mode (except pipeline) and preserve the data of that link.
    # then we near the locations to the nodes on the now split links.
    # we ignore locations with near dist == 0 on those nodes.
    # then we add the artificial link and note which locations got links.
    # then we set the connects_to  field if the location was connected.

    logger.debug(
        "start: locations_add_links for mode: {}".format(modal_layer_name))

    scenario_gdb = the_scenario.main_gdb
    fp_to_modal_layer = os.path.join(scenario_gdb, "network", modal_layer_name)

    locations_fc = the_scenario.locations_fc
    arcpy.DeleteField_management(fp_to_modal_layer, "LOCATION_ID")
    arcpy.AddField_management(os.path.join(scenario_gdb, modal_layer_name),
                              "LOCATION_ID", "long")

    arcpy.DeleteField_management(fp_to_modal_layer, "LOCATION_ID_NAME")
    arcpy.AddField_management(os.path.join(scenario_gdb, modal_layer_name),
                              "LOCATION_ID_NAME", "text")

    if float(max_artificial_link_distance_miles.strip(" Miles")) < 0.0000001:
        logger.warning(
            "Note: ignoring mode {}. User specified artificial link distance of {}"
            .format(modal_layer_name, max_artificial_link_distance_miles))
        logger.debug(
            "Setting the definition query to artificial = 99999, so we get an empty dataset for the "
            "make_feature_layer and subsequent near analysis")

        definition_query = "Artificial = 999999"  # something to return an empty set
    else:
        definition_query = "Artificial = 0"  # the normal def query.

    if "pipeline" in modal_layer_name:

        if arcpy.Exists(
                os.path.join(scenario_gdb, "network",
                             fp_to_modal_layer + "_points")):
            arcpy.Delete_management(
                os.path.join(scenario_gdb, "network",
                             fp_to_modal_layer + "_points"))

        # limit near to end points
        arcpy.FeatureVerticesToPoints_management(
            in_features=fp_to_modal_layer,
            out_feature_class=fp_to_modal_layer + "_points",
            point_location="BOTH_ENDS")
        logger.debug("start:  make_featurelayer_management")
        arcpy.MakeFeatureLayer_management(fp_to_modal_layer + "_points",
                                          "modal_lyr_" + modal_layer_name,
                                          definition_query)

    else:
        logger.debug("start:  make_featurelayer_management")
        arcpy.MakeFeatureLayer_management(fp_to_modal_layer,
                                          "modal_lyr_" + modal_layer_name,
                                          definition_query)

    logger.debug(
        "adding links between locations_fc and mode {} with max dist of {}".
        format(modal_layer_name, max_artificial_link_distance_miles))

    if arcpy.Exists(os.path.join(scenario_gdb, "tmp_near")):
        logger.debug("start:  delete tmp near")
        arcpy.Delete_management(os.path.join(scenario_gdb, "tmp_near"))

    logger.debug("start:  generate_near")
    arcpy.GenerateNearTable_analysis(locations_fc,
                                     "modal_lyr_" + modal_layer_name,
                                     os.path.join(scenario_gdb, "tmp_near"),
                                     max_artificial_link_distance_miles,
                                     "LOCATION", "NO_ANGLE", "CLOSEST")

    edit = arcpy.da.Editor(os.path.join(scenario_gdb))
    edit.startEditing(False, False)
    edit.startOperation()

    id_fieldname = arcpy.Describe(os.path.join(scenario_gdb,
                                               modal_layer_name)).OIDFieldName

    seenids = {}

    # SPLIT LINKS LOGIC
    # 1) first search through the tmp_near fc and add points from the near on that link.
    # 2) next we query the mode layer and get the mode specific data using the near FID.
    # 3) then we split the old link, and use insert cursor to populate mode specific data into fc for the two new links.
    # 4) then we delete the old unsplit link
    logger.debug("start:  split links")
    with arcpy.da.SearchCursor(
            os.path.join(scenario_gdb, "tmp_near"),
        ["NEAR_FID", "NEAR_X", "NEAR_Y", "NEAR_DIST"]) as scursor:

        for row in scursor:

            # if the near distance is 0, then its connected and we don't need to
            # split the line.
            if row[3] == 0:
                # only give debug warnring if not pipeline.
                if "pipleine" not in modal_layer_name:
                    logger.warning(
                        "Split links code: LOCATION MIGHT BE ON THE NETWORK. Ignoring NEAR_FID {} with NEAR_DIST {}"
                        .format(row[0], row[3]))

            if not row[3] == 0:

                # STEP 1: point geoms where to split from the near XY
                # ---------------------------------------------------
                # get the line ID to split
                theIdToGet = str(row[0])  # this is the link id we need.

                if not theIdToGet in seenids:
                    seenids[theIdToGet] = []

                point = arcpy.Point()
                point.X = float(row[1])
                point.Y = float(row[2])
                point_geom = arcpy.PointGeometry(point,
                                                 ftot_supporting_gis.LCC_PROJ)
                seenids[theIdToGet].append(point_geom)

        # STEP 2 -- get mode specific data from the link
        # ------------------------------------------------
        if 'pipeline' not in modal_layer_name:

            for theIdToGet in seenids:

                # initialize the variables so we dont get any gremlins.
                in_line = None  # the shape geometry
                in_capacity = None  # road + rail
                in_volume = None  # road + rail
                in_vcr = None  # road + rail | volume to capacity ratio
                in_fclass = None  # road | fclass
                in_speed = None  # road | rounded speed
                in_stracnet = None  # rail
                in_density_code = None  # rail
                in_tot_up_dwn = None  # water

                if modal_layer_name == 'road':
                    for row in arcpy.da.SearchCursor(
                            os.path.join(scenario_gdb, modal_layer_name), [
                                "SHAPE@", "Capacity", "Volume", "VCR",
                                "FCLASS", "ROUNDED_SPEED"
                            ],
                            where_clause=id_fieldname + " = " + theIdToGet):
                        in_line = row[0]
                        in_capacity = row[1]
                        in_volume = row[2]
                        in_vcr = row[3]
                        in_fclass = row[4]
                        in_speed = row[5]

                if modal_layer_name == 'rail':
                    for row in arcpy.da.SearchCursor(
                            os.path.join(scenario_gdb, modal_layer_name), [
                                "SHAPE@", "Capacity", "Volume", "VCR",
                                "STRACNET", "DENSITY_CODE"
                            ],
                            where_clause=id_fieldname + " = " + theIdToGet):
                        in_line = row[0]
                        in_capacity = row[1]
                        in_volume = row[2]
                        in_vcr = row[3]
                        in_stracnet = row[4]
                        in_density_code = row[5]

                if modal_layer_name == 'water':
                    for row in arcpy.da.SearchCursor(
                            os.path.join(scenario_gdb, modal_layer_name),
                        ["SHAPE@", "Capacity", "Volume", "VCR", "TOT_UP_DWN"],
                            where_clause=id_fieldname + " = " + theIdToGet):
                        in_line = row[0]
                        in_capacity = row[1]
                        in_volume = row[2]
                        in_vcr = row[3]
                        in_tot_up_dwn = row[4]

                # STEP 3: Split and populate with mode specific data from old link
                # ------------------------------------------------------------------
                split_lines = arcpy.management.SplitLineAtPoint(
                    in_line, seenids[theIdToGet], arcpy.Geometry(), 1)

                if not len(split_lines) == 1:

                    # ROAD
                    if modal_layer_name == 'road':

                        icursor = arcpy.da.InsertCursor(
                            os.path.join(scenario_gdb, modal_layer_name), [
                                'SHAPE@', 'Artificial', 'MODE_TYPE', 'MILES',
                                'FCLASS', 'ROUNDED_SPEED', 'Volume',
                                'Capacity', 'VCR'
                            ])

                        # Insert new links that include the mode-specific attributes
                        for new_line in split_lines:
                            len_in_miles = Q_(new_line.length,
                                              "meters").to("miles").magnitude
                            icursor.insertRow([
                                new_line, 0, modal_layer_name, len_in_miles,
                                in_fclass, in_speed, in_volume, in_capacity,
                                in_vcr
                            ])

                        # Delete cursor object
                        del icursor

                    elif modal_layer_name == 'rail':
                        icursor = arcpy.da.InsertCursor(
                            os.path.join(scenario_gdb, modal_layer_name), [
                                'SHAPE@', 'Artificial', 'MODE_TYPE', 'MILES',
                                'STRACNET', 'DENSITY_CODE', 'Volume',
                                'Capacity', 'VCR'
                            ])

                        # Insert new rows that include the mode-specific attributes
                        for new_line in split_lines:
                            len_in_miles = Q_(new_line.length,
                                              "meters").to("miles").magnitude
                            icursor.insertRow([
                                new_line, 0, modal_layer_name, len_in_miles,
                                in_stracnet, in_density_code, in_volume,
                                in_capacity, in_vcr
                            ])

                        # Delete cursor object
                        del icursor

                    elif modal_layer_name == 'water':

                        icursor = arcpy.da.InsertCursor(
                            os.path.join(scenario_gdb, modal_layer_name), [
                                'SHAPE@', 'Artificial', 'MODE_TYPE', 'MILES',
                                'TOT_UP_DWN', 'Volume', 'Capacity', 'VCR'
                            ])

                        # Insert new rows that include the mode-specific attributes
                        for new_line in split_lines:
                            len_in_miles = Q_(new_line.length,
                                              "meters").to("miles").magnitude
                            icursor.insertRow([
                                new_line, 0, modal_layer_name, len_in_miles,
                                in_tot_up_dwn, in_volume, in_capacity, in_vcr
                            ])

                        # Delete cursor object
                        del icursor

                    else:
                        logger.warning(
                            "Modal_layer_name: {} is not supported.".format(
                                modal_layer_name))

                    # STEP 4:  Delete old unsplit data
                    with arcpy.da.UpdateCursor(os.path.join(
                            scenario_gdb, modal_layer_name), ['OID@'],
                                               where_clause=id_fieldname +
                                               " = " + theIdToGet) as ucursor:
                        for row in ucursor:
                            ucursor.deleteRow()

                # if the split doesn't work.
                else:
                    logger.detailed_debug(
                        "the line split didn't work for ID: {}. "
                        "Might want to investigate. "
                        "Could just be an artifact from the near result being the end of a line."
                        .format(theIdToGet))

    edit.stopOperation()
    edit.stopEditing(True)

    # delete the old features
    # ------------------------
    logger.debug(
        "start:  delete old features (tmp_near, tmp_near_2, tmp_nodes)")
    if arcpy.Exists(os.path.join(scenario_gdb, "tmp_near")):
        arcpy.Delete_management(os.path.join(scenario_gdb, "tmp_near"))

    if arcpy.Exists(os.path.join(scenario_gdb, "tmp_near_2")):
        arcpy.Delete_management(os.path.join(scenario_gdb, "tmp_near_2"))

    if arcpy.Exists(os.path.join(scenario_gdb, "tmp_nodes")):
        arcpy.Delete_management(os.path.join(scenario_gdb, "tmp_nodes"))

    # Add artificial links now.
    # now that the lines have been split add lines from the from points to the nearest node
    # --------------------------------------------------------------------------------------
    logger.debug(
        "start:  add artificial links now w/ definition_query: {}".format(
            definition_query))
    logger.debug("start:  make_featurelayer 2")
    fp_to_modal_layer = os.path.join(scenario_gdb, "network", modal_layer_name)
    arcpy.MakeFeatureLayer_management(fp_to_modal_layer,
                                      "modal_lyr_" + modal_layer_name + "2",
                                      definition_query)
    logger.debug("start:  feature vertices to points 2")
    arcpy.FeatureVerticesToPoints_management(
        in_features="modal_lyr_" + modal_layer_name + "2",
        out_feature_class=os.path.join(scenario_gdb, "tmp_nodes"),
        point_location="BOTH_ENDS")
    logger.debug("start:  generate near table 2")
    arcpy.GenerateNearTable_analysis(locations_fc,
                                     os.path.join(scenario_gdb, "tmp_nodes"),
                                     os.path.join(scenario_gdb, "tmp_near_2"),
                                     max_artificial_link_distance_miles,
                                     "LOCATION", "NO_ANGLE", "CLOSEST")

    logger.debug("start:  delete tmp_nodes")
    arcpy.Delete_management(os.path.join(scenario_gdb, "tmp_nodes"))

    logger.debug("start:  start editor")
    edit = arcpy.da.Editor(os.path.join(scenario_gdb))
    edit.startEditing(False, False)
    edit.startOperation()

    icursor = arcpy.da.InsertCursor(
        os.path.join(scenario_gdb, modal_layer_name), [
            'SHAPE@', 'Artificial', 'MODE_TYPE', 'MILES', 'LOCATION_ID',
            'LOCATION_ID_NAME'
        ])  # add location_id for setting flow restrictions

    location_id_name_dict = get_location_id_name_dict(the_scenario, logger)
    connected_location_ids = []
    connected_location_id_names = []
    logger.debug("start:  search cursor on tmp_near_2")
    with arcpy.da.SearchCursor(
            os.path.join(scenario_gdb, "tmp_near_2"),
        ["FROM_X", "FROM_Y", "NEAR_X", "NEAR_Y", "NEAR_DIST", "IN_FID"
         ]) as scursor:

        for row in scursor:

            if not row[4] == 0:

                # use the unique objectid (in_fid) from the near to determine
                # if we have an in or an out location.
                # then set the flow restrictions appropriately.

                in_fid = row[5]
                location_id_name = location_id_name_dict[in_fid]
                location_id = location_id_name.split("_")[0]
                connected_location_ids.append(location_id)
                connected_location_id_names.append(location_id_name)

                coordList = []
                coordList.append(arcpy.Point(row[0], row[1]))
                coordList.append(arcpy.Point(row[2], row[3]))
                polyline = arcpy.Polyline(arcpy.Array(coordList))

                len_in_miles = Q_(polyline.length,
                                  "meters").to("miles").magnitude

                # insert artificial link attributes
                icursor.insertRow([
                    polyline, 1, modal_layer_name, len_in_miles, location_id,
                    location_id_name
                ])

            else:
                logger.warning(
                    "Artificial Link code: Ignoring NEAR_FID {} with NEAR_DIST {}"
                    .format(row[0], row[4]))

    del icursor
    logger.debug("start:  stop editing")
    edit.stopOperation()
    edit.stopEditing(True)

    arcpy.Delete_management(os.path.join(scenario_gdb, "tmp_near_2"))

    # ALSO SET CONNECTS_X FIELD IN POINT LAYER
    # -----------------------------------------
    logger.debug("start:  connect_x")
    arcpy.AddField_management(os.path.join(scenario_gdb, locations_fc),
                              "connects_" + modal_layer_name, "SHORT")
    arcpy.CalculateField_management(os.path.join(scenario_gdb, locations_fc),
                                    "connects_" + modal_layer_name, 0,
                                    "PYTHON_9.3")

    edit = arcpy.da.Editor(scenario_gdb)
    edit.startEditing(False, False)
    edit.startOperation()
    with arcpy.da.UpdateCursor(
            os.path.join(scenario_gdb, locations_fc),
        ["LOCATION_ID_NAME", "connects_" + modal_layer_name]) as cursor:

        for row in cursor:

            if row[0] in connected_location_id_names:
                row[1] = 1
                cursor.updateRow(row)

    edit.stopOperation()
    edit.stopEditing(True)

    logger.debug("finish: locations_add_links")