Ejemplo n.º 1
0
    row = [
        count, point_obj, project['projectid'], project['projectname'],
        project['webcategory'], project['projectmanager']
    ]
    edit_points.insertRow(row)
    #     print count,project['lat'],project['lon']
    count = count + 1

del edit_points
# win32api.MessageBox(0, arcpy.env.workspace, featureName)

#Spatially select art80 polygon with live sql art80 point
parcel_select = "art80_parcel_intersect_temp_" + str(int(time.time()))
arcpy.MakeFeatureLayer_management(parcel, parcel_select)
arcpy.SelectLayerByLocation_management(parcel_select, "INTERSECT", featureName)
parcel_out = "art80_parcel_intersect_" + str(int(time.time()))
arcpy.CopyFeatures_management(parcel_select, parcel_out)

parcel_joined = "Art80_Parcel_joined_" + str(int(time.time()))
arcpy.SpatialJoin_analysis(parcel_out, featureName, parcel_joined,
                           "JOIN_ONE_TO_MANY", "KEEP_ALL", "", "", "", "")

parcel_buffered = "Art80_Parcel_buffered_" + str(int(time.time()))
arcpy.Buffer_analysis(parcel_joined, parcel_buffered, "70 Feet", 'FULL',
                      'ROUND', 'NONE')

server = 'vsql22.cityhall.boston.cob'
db = 'PWD_cobucs'
user = '******'
password = '******'
Ejemplo n.º 2
0
def preprocessing(stream_linework):

    import arcpy

    # initialize the arcpy environment
    arcpy.env.workspace = os.getcwd()
    arcpy.env.overwriteOutput = True
    arcpy.env.qualifiedFieldNames = False
    arcpy.CheckOutExtension("spatial")  # Check spatial analyst license

    print "\nperforming spatial join of linework to grid... "

    # if a list of features is provided, merge them together; put in same place as first input file
    if not isinstance(stream_linework, basestring):
        print "merging: "
        for lw in stream_linework:
            print lw
        merged_linework = os.path.join(
            os.path.split(stream_linework[0])[0], 'input_linework.shp')
        arcpy.Merge_management(stream_linework, merged_linework)
        stream_linework = merged_linework

    arcpy.SpatialJoin_analysis(MFgrid, stream_linework, stream_cells,
                               "JOIN_ONE_TO_MANY", "KEEP_COMMON")

    print "\nDissolving river cells on cell number to isolate unique cells...\n"
    arcpy.Dissolve_management(stream_cells, stream_cells_dissolve,
                              MFgrid_node_attribute)

    print "Exploding new stream linework to grid cells using Intersect and Multipart to Singlepart..."
    arcpy.Intersect_analysis([stream_cells_dissolve, stream_linework],
                             "tmp_intersect.shp")
    arcpy.MultipartToSinglepart_management("tmp_intersect.shp",
                                           stream_fragments)

    # make a new feature layer from exploded shape file
    arcpy.MakeFeatureLayer_management(stream_fragments, 'stream_fragments')

    if not from_scratch:
        print "Removing linework that overlaps with existing SFR cells..."
        arcpy.MakeFeatureLayer_management(existing_sfr_shp,
                                          'existing_sfr_cells')
        # use "WITHIN"; "INTERSECT" was also deleting fragments that touched existing cells
        arcpy.SelectLayerByLocation_management('stream_fragments', "INTERSECT",
                                               'existing_sfr_cells', "",
                                               "NEW_SELECTION")
        arcpy.DeleteFeatures_management('stream_fragments')
        #arcpy.CopyFeatures_management('stream_fragments', stream_fragments) # save layer to shapefile

    print "Adding in stream geometry..."
    #set up list and dictionary for fields, types, and associated commands
    fields = ('X_start', 'Y_start', 'X_end', 'Y_end', 'LengthFt')
    types = {
        'X_start': 'DOUBLE',
        'Y_start': 'DOUBLE',
        'X_end': 'DOUBLE',
        'Y_end': 'DOUBLE',
        'LengthFt': 'DOUBLE'
    }
    commands = {
        'X_start': "float(!SHAPE.firstpoint!.split()[0])",
        'Y_start': "float(!SHAPE.firstpoint!.split()[1])",
        'X_end': "float(!SHAPE.lastpoint!.split()[0])",
        'Y_end': "float(!SHAPE.lastpoint!.split()[1])",
        'LengthFt': "float(!SHAPE.length!)"
    }

    #add fields for start, end, and length
    for fld in fields:
        arcpy.AddField_management('stream_fragments', fld, types[fld])

    #calculate the fields
    for fld in fields:
        print "\tcalculating %s(s)..." % (fld)
        arcpy.CalculateField_management('stream_fragments', fld, commands[fld],
                                        "PYTHON")

    ofp.write('\n' + 25 * '#' +
              '\nRemoving reaches with lengths less than or equal to %s...\n' %
              reach_cutoff)
    print "\nRemoving reaches with lengths less than or equal to %s..." % reach_cutoff
    table = arcpy.UpdateCursor('stream_fragments')
    count = 0
    for reaches in table:
        if reaches.getValue('LengthFt') <= reach_cutoff:
            print "cellnum: %d" % (reaches.getValue(MFgrid_node_attribute)),
            ofp.write("cellnum: %d" %
                      (reaches.getValue(MFgrid_node_attribute)))
            table.deleteRow(reaches)
            count += 1
    print "\nremoved %s reaches with lengths <= %s\n" % (count, reach_cutoff)
    ofp.write("removed %s reaches with lengths <= %s\n" %
              (count, reach_cutoff))

    # create a unique ID for each new stream fragment using FID and number of existing reaches
    arcpy.AddField_management('stream_fragments', "FragID", "LONG")
    arcpy.CalculateField_management('stream_fragments', "FragID",
                                    "!FID! + {0:d}".format(nreaches), "PYTHON")
Ejemplo n.º 3
0
    #if wrtLineFC was provided, check for only one line
    if plotWRT == 'true':
        if wrtLineFC <> '':
            #if the 'Single line layer' parameter is not empty first check that the layer contains line
            desFC = arcpy.Describe(wrtLineFC)
            if desFC.shapeType <> 'Polyline':
                arcpy.AddError("Select a line layer for the 'Single line layer' parameter.")
                raise SystemError
            #and then check that there is only one feature in the layer or in the selection
            result = int(arcpy.GetCount_management(wrtLineFC).getOutput(0))
            if result > 1:
                arcpy.AddError("'Single line layer' has more than one line in it or more than one line selected.")
                raise SystemError
            #now select only those profile lines that intersect the the WRT line
            arcpy.SelectLayerByLocation_management(linesLayer, "INTERSECT", wrtLineFC)
        else:
            arcpy.AddError("'Single line layer' parameter is empty.")
            raise SystemError
    
    #environment variables
    arcpy.env.overwriteOutput = True
    scratchDir = arcpy.env.scratchWorkspace
    arcpy.env.workspace = scratchDir

    #add an rkey field to the table that consists of values from the OID
    desc = arcpy.Describe(linesLayer)
    idField = desc.OIDFieldName
    addAndCalc(linesLayer, 'ORIG_FID', '[' + idField + ']')
    
    #interpolate the lines
Ejemplo n.º 4
0
# get the data frame
df = arcpy.mapping.ListDataFrames(mxd, "*")[0]
print "5"

# create a new layer
citrus_lyr = arcpy.mapping.Layer(citrus)
grid = arcpy.mapping.Layer(grid)
print "6"

# add citrus layer to the map
arcpy.mapping.AddLayer(df, citrus_lyr, "AUTO_ARRANGE")
print "7"

# select the grids that intersect with citrus polygons
arcpy.MakeFeatureLayer_management(grid, 'grid_sel')
arcpy.SelectLayerByLocation_management('grid_sel', 'intersect', citrus)
print "8"

# save selected grids into new layer
arcpy.CopyFeatures_management('grid_sel', 'grid_sel_layer')
print "9"

# export the selected grids to an excel file
arcpy.TableToExcel_conversion(path + r"\grid_sel_layer.shp",
                              path + r"\grids.xls")
print "10"

#import the excel file back to python
workbook = xlrd.open_workbook(path + r"\grids.xls")
sheet = workbook.sheet_by_index(0)
print "11"
Ejemplo n.º 5
0
    def Eliminate(inFeatures, outFeatureClass, expression, ciclo):
        nombre = "M" + str(random.randrange(0, 5000))
        templfeatures = "blocklayer2" + "_" + str(random.randrange(0, 5000))
        print inFeatures
        arcpy.MakeFeatureLayer_management(inFeatures, templfeatures)
        if ciclo == 1:
            nombre = "M" + str(random.randrange(0, 5000))
            arcpy.MakeFeatureLayer_management(grilla, templGrilla)
            arcpy.MakeFeatureLayer_management(inFeatures, templfeatures)
            path = r"in_memory\%s" % nombre
            print "layer temporal1"
            fc_grilla = arcpy.SelectLayerByAttribute_management(
                templGrilla, "NEW_SELECTION", "PageNumber  = %s" % str(numero))
            print "selecionando grilla"
            fc_select = arcpy.SelectLayerByLocation_management(
                templfeatures, "have_their_center_in", templGrilla)
            print "seleccionando por centroides"
            arcpy.CopyFeatures_management(templfeatures, path)

            arcpy.MakeFeatureLayer_management(path, path + ".lyr")
            arcpy.AddField_management(in_table=path + ".lyr",
                                      field_name="Area_ha",
                                      field_type="DOUBLE")
            arcpy.CalculateField_management(in_table=path + ".lyr",
                                            field="Area_ha",
                                            expression="!SHAPE.area!",
                                            expression_type="PYTHON")
            print "layer temporal2"
            print "layer seleccione"
            fc_filtro = arcpy.SelectLayerByAttribute_management(
                path + ".lyr", "NEW_SELECTION", expression)
            print "corriendo eliminate primer ciclo"
            arcpy.Eliminate_management(path + ".lyr", outFeatureClass,
                                       "LENGTH", "")

        if ciclo == 0:
            arcpy.MakeFeatureLayer_management(templfeatures,
                                              templfeatures + ".lyr")
            arcpy.AddField_management(in_table=templfeatures + ".lyr",
                                      field_name="Area_ha",
                                      field_type="DOUBLE")
            arcpy.CalculateField_management(in_table=templfeatures + ".lyr",
                                            field="Area_ha",
                                            expression="!SHAPE.area!",
                                            expression_type="PYTHON")
            print "seleccionando ciclos alternos"
            fc_filtro = arcpy.SelectLayerByAttribute_management(
                templfeatures, "NEW_SELECTION", expression)
            print "corriendo eliminate ciclos alternos"
            arcpy.Eliminate_management(templfeatures, outFeatureClass,
                                       "LENGTH", "")

        if ciclo == 2:
            arcpy.MakeFeatureLayer_management(templfeatures,
                                              templfeatures + ".lyr")
            arcpy.CalculateField_management(in_table=templfeatures + ".lyr",
                                            field="Area_ha",
                                            expression="!SHAPE.area!",
                                            expression_type="PYTHON")
            print "seleccionando ciclos alternos"
            fc_filtro = arcpy.SelectLayerByAttribute_management(
                templfeatures, "NEW_SELECTION", expression)
            print "corriendo eliminate ciclos alternos"
            arcpy.Eliminate_management(templfeatures, outFeatureClass,
                                       "LENGTH", "")
Ejemplo n.º 6
0
print "Start time: " + str(datetime.datetime.now())
##  for each huc
for huc in hucIDs:
    try:
        print huc, str(hucIDs.index(huc) + 1) + " / " + str(len(hucIDs))
        ##  clear any existing mask or extent
        arcpy.env.mask = ""
        arcpy.env.extent = ""

        ##  select the huc with the current id
        arcpy.SelectLayerByAttribute_management("hucLayer", "NEW_SELECTION",
                                                "\"HUC_6\" = '" + huc + "'")

        ##  select roads by intersection with huc polygon and 575 meter distance
        arcpy.SelectLayerByLocation_management("rdsLayer", "INTERSECT",
                                               "hucLayer", "575 Meters")

        ##  subset the selected roads to include only those categorized as paved, but excluding those with MTFCC code S1400 that do not have a name
        arcpy.SelectLayerByAttribute_management(
            "rdsLayer", "SUBSET_SELECTION",
            '("MTFCC" = \'S1100\' OR "MTFCC" = \'S1200\' OR "MTFCC" = \'S1630\' OR "MTFCC" = \'S1730\') or ("MTFCC" = \'S1400\' AND "FULLNAME" <> \'\')'
        )

        ##  remove problematic roads from a few hucs
        if huc == "031200":
            ##  remove some roads, easily identified from attributes as forest roads
            arcpy.SelectLayerByAttribute_management(
                "rdsLayer", "REMOVE_FROM_SELECTION",
                '"FULLNAME" LIKE \'Forest Rd%\' OR "FULLNAME" LIKE \'Nfr%\' OR "FULLNAME" LIKE \'National%\''
            )
Ejemplo n.º 7
0
TR_TrkRte = arcpy.GetParameterAsText(1)
if TR_TrkRte == '#' or not TR_TrkRte:
    TR_TrkRte = "\\\\watis\\public\\InternsSpring2017\\Spring2017_WT_GeoDb.mdb\\Transportation\\TR_TrkRte"  # provide a default value if unspecified

# Local variables:
WT_Pipe_main_surf_test__12_ = WT_Pipe_main_surf
WT_Pipe_main_surf_Layer2 = "WT_Pipe_main_surf_Layer2"
WT_Pipe_main_surf_Layer2__2_ = WT_Pipe_main_surf_Layer2
WT_Pipe_main_surf_test__5_ = WT_Pipe_main_surf_Layer2__2_

# Process: Calculate Field
arcpy.CalculateField_management(WT_Pipe_main_surf, "Risk_TruckRoute", "1",
                                "PYTHON", "")

# Process: Make Feature Layer
arcpy.MakeFeatureLayer_management(
    WT_Pipe_main_surf_test__12_, WT_Pipe_main_surf_Layer2, "", "",
    "OBJECTID OBJECTID VISIBLE NONE;GID GID VISIBLE NONE;LAYER LAYER VISIBLE NONE;PIPEID PIPEID VISIBLE NONE;MATERIAL MATERIAL VISIBLE NONE;MaterialDesc MaterialDesc VISIBLE NONE;DIAMETER DIAMETER VISIBLE NONE;MEASUREDLE MEASUREDLE VISIBLE NONE;ACTUALLENG ACTUALLENG VISIBLE NONE;INSTALLATI INSTALLATI VISIBLE NONE;DRAWINGNO DRAWINGNO VISIBLE NONE;SOURCE SOURCE VISIBLE NONE;PressureZo PressureZo VISIBLE NONE;RiskCondition RiskCondition VISIBLE NONE;RiskFactor RiskFactor VISIBLE NONE;RiskIndex RiskIndex VISIBLE NONE;Shape Shape VISIBLE NONE;Z_Min Z_Min VISIBLE NONE;Z_Max Z_Max VISIBLE NONE;Z_Mean Z_Mean VISIBLE NONE;SLength SLength VISIBLE NONE;Min_Slope Min_Slope VISIBLE NONE;Max_Slope Max_Slope VISIBLE NONE;Avg_Slope Avg_Slope VISIBLE NONE;Shape_Length Shape_Length VISIBLE NONE;Risk_Pipe Risk_Pipe VISIBLE NONE;Risk_TruckRoute Risk_TruckRoute VISIBLE NONE;Risk_School Risk_School VISIBLE NONE;Risk_Business Risk_Business VISIBLE NONE;Risk_Facility Risk_Facility VISIBLE NONE;Risk_Park Risk_Park VISIBLE NONE;Risk_PumpStation Risk_PumpStation VISIBLE NONE;Risk_Creek Risk_Creek VISIBLE NONE;Risk_Diameter Risk_Diameter VISIBLE NONE;Likelihood Likelihood VISIBLE NONE;Consequence Consequence VISIBLE NONE;Risk_Index Risk_Index VISIBLE NONE"
)

# Process: Select Layer By Location
arcpy.SelectLayerByLocation_management(WT_Pipe_main_surf_Layer2,
                                       "WITHIN_A_DISTANCE", TR_TrkRte,
                                       "75 Feet", "NEW_SELECTION",
                                       "NOT_INVERT")

# Process: Calculate Field (2)
arcpy.CalculateField_management(WT_Pipe_main_surf_Layer2__2_,
                                "Risk_TruckRoute", "5", "PYTHON", "")
Ejemplo n.º 8
0
def assign_pickup_day(subdivs, coll_grid):
    correct_subnames(subdivs)

    arcpy.MakeFeatureLayer_management(subdivs, "sub_lyr")
    arcpy.SelectLayerByLocation_management("sub_lyr",
                                           "INTERSECT",
                                           coll_grid,
                                           selection_type='NEW_SELECTION')
    arcpy.SelectLayerByLocation_management("sub_lyr",
                                           "INTERSECT",
                                           coll_grid,
                                           selection_type='SWITCH_SELECTION')
    arcpy.DeleteFeatures_management("sub_lyr")

    arcpy.SelectLayerByAttribute_management("sub_lyr", "CLEAR_SELECTION")

    arcpy.SelectLayerByLocation_management("sub_lyr",
                                           "INTERSECT",
                                           coll_grid,
                                           selection_type='NEW_SELECTION')

    sub_names_list = [
        row[0] for row in arcpy.da.SearchCursor("sub_lyr", ['Class'])
    ]

    try:
        arcpy.AddField_management(subdivs, "Trash_and_Recycling_Day", "TEXT")
        arcpy.AddField_management(subdivs, "YardWasteDay", "TEXT")
        arcpy.AddField_management(coll_grid, "Trash_and_Recycling_Day", "TEXT")
        arcpy.AddField_management(subdivs, "Current", "TEXT")
        arcpy.MakeFeatureLayer_management(subdivs, "SubDivs")
        arcpy.MakeFeatureLayer_management(coll_grid, "RecGrid")

    except Exception as e:
        tb = traceback.format_exc()
        email(tb)
        raise sys.exit()

    try:
        with arcpy.da.UpdateCursor(
                coll_grid, ['Recycling', "Trash_and_Recycling_Day"]) as ucur:
            for row in ucur:
                if row[0] in days_dict.keys():
                    row[1] = days_dict[row[0]]
                    ucur.updateRow(row)
    except Exception as e:
        tb = traceback.format_exc()
        email(tb)
        raise sys.exit()

    #print sub_names_list
    for sub_name in sorted(sub_names_list):
        #print str(sub_name), type(sub_name)
        try:
            day_dict = defaultdict(int)
            arcpy.SelectLayerByAttribute_management(
                "SubDivs", "NEW_SELECTION",
                "\"Class\" = '{}'".format(sub_name))
            #print [row[0] for row in arcpy.da.SearchCursor("SubDivs", ['Class'])]

            arcpy.SelectLayerByLocation_management(
                "RecGrid",
                "INTERSECT",
                "SubDivs",
                selection_type='NEW_SELECTION')
            count = int(arcpy.GetCount_management("RecGrid").getOutput(0))
        except Exception as e:
            tb = traceback.format_exc()
            email(tb)
            raise sys.exit()

        print sub_name, "-----", [
            row[0] for row in arcpy.da.SearchCursor("RecGrid", ['Street'])
        ], count

        print[f.name for f in arcpy.ListFields("RecGrid")]

        try:
            with arcpy.da.SearchCursor("RecGrid",
                                       ["Trash_and_Recycling_Day"]) as scur:
                for row in scur:

                    day_dict[row[0]] += 1

            for k, v in day_dict.items():

                print k, v

        except Exception as e:
            tb = traceback.format_exc()
            email(tb)
            raise sys.exit()

        v = list(day_dict.values())
        k = list(day_dict.keys())
        print k
        print v
        try:

            major_day = k[v.index(max(v))]
            #print "MAJOR DAYYYYYYYYYY", major_day, type(major_day)

            assign_dict[str(sub_name)] = str(major_day)

        except Exception as e:
            tb = traceback.format_exc()
            email(tb)
            raise sys.exit()

    log.info("ASSIGNING COLLECTION DAYS")
    for k, v in assign_dict.items():
        #print type(k), k, type(v), v
        log.info("\t Subdivison Collection Day Assigned values {}--{}--{}--{}".
                 format(k, type(k), v, type(v)))

    return assign_dict
Ejemplo n.º 9
0
def selectLayer(lyr, ws):
    cantCli = int(arcpy.GetCount_management(lyr).getOutput(0))
    container = []
    reg = 2
    for i in range(cantCli)[:200000]:
        try:
            cli = arcpy.MakeFeatureLayer_management(CLIENTE, "cli",
                                                    "OBJECTID = %s" % i)
            acont = arcpy.SelectLayerByLocation_management(
                "ACONT_mfl", 'INTERSECT', "cli", '#', 'NEW_SELECTION',
                'NOT_INVERT'
            )  # Seleccionar capa de Area de influencia de cada cliente
            ainfl = arcpy.SelectLayerByLocation_management(
                "AINFL_mfl", 'INTERSECT', "cli", '#', 'NEW_SELECTION',
                'NOT_INVERT'
            )  # Seleccionar capa de Area de influencia de cada cliente
            napfl = arcpy.SelectLayerByLocation_management(
                "NAP_mfl", 'INTERSECT', acont, '#', 'NEW_SELECTION',
                'NOT_INVERT')  # Seleccionar capas de NAP
            tapfl = arcpy.SelectLayerByLocation_management(
                "TAP_mfl", 'INTERSECT', ainfl, '#', 'NEW_SELECTION',
                'NOT_INVERT')  # Seleccionar capas de TAP

            with arcpy.da.SearchCursor(cli,
                                       ["ID", "SHAPE@X", "SHAPE@Y"]) as cursor:
                for x in cursor:
                    ws["A{}".format(reg)] = x[0]
                    ws["B{}".format(reg)] = x[1]
                    ws["C{}".format(reg)] = x[2]
                    ws["M{}".format(reg)] = "NO"

            listaNAP = []
            with arcpy.da.SearchCursor(napfl, [
                    codNAP, "SHAPE@X", "SHAPE@Y", "mn_estado_nap",
                    "mn_capacidad_nap", "mn_cnt_hilos_libres", "mn_tipo_nap",
                    "mn_sector_tdp", "mn_numcoo_x", "mn_numcoo_y"
            ]) as cursor:
                for m in cursor:
                    cli_fd = [[x[0], x[1], x[2]]
                              for x in arcpy.da.SearchCursor(
                                  cli, ["ID", "SHAPE@X", "SHAPE@Y"])]
                    cli_x = cli_fd[0][1]
                    cli_y = cli_fd[0][2]
                    nap_x = m[1]
                    nap_y = m[2]
                    dist = round(
                        math.sqrt(
                            (pow(cli_x - nap_x, 2) + pow(cli_y - nap_y, 2))) *
                        111110, 1)
                    rowNAP = [
                        dist, m[0], m[3], m[4], m[5], m[6], m[7], m[8], m[9],
                        "SI"
                    ]
                    listaNAP.append(rowNAP)
            listaNAP.sort(key=lambda x: x[0])
            if len(listaNAP) != 0:
                ws["D{}".format(reg)] = listaNAP[0][0]
                ws["E{}".format(reg)] = listaNAP[0][1]
                ws["F{}".format(reg)] = listaNAP[0][2]
                ws["G{}".format(reg)] = listaNAP[0][3]
                ws["H{}".format(reg)] = listaNAP[0][4]
                ws["I{}".format(reg)] = listaNAP[0][5]
                ws["J{}".format(reg)] = listaNAP[0][6]
                ws["K{}".format(reg)] = listaNAP[0][7]
                ws["L{}".format(reg)] = listaNAP[0][8]
                ws["M{}".format(reg)] = listaNAP[0][9]

            listaTAP = []
            with arcpy.da.SearchCursor(tapfl, [
                    codTAP, "SHAPE@X", "SHAPE@Y", "MTCODNOD", "MTTIPTRO",
                    "MTTRONCAL", "COD_TAP", "MTNUMBOR", "MTCNTBORLBR",
                    "MTTIPO", "NUMCOO_X", "NUMCOO_Y"
            ]) as cursor:
                for n in cursor:
                    cli_fd = [[x[0], x[1], x[2]]
                              for x in arcpy.da.SearchCursor(
                                  cli, ["ID", "SHAPE@X", "SHAPE@Y"])]
                    cli_x = cli_fd[0][1]
                    cli_y = cli_fd[0][2]
                    tap_x = n[1]
                    tap_y = n[2]
                    dist = round(
                        math.sqrt(
                            (pow(cli_x - tap_x, 2) + pow(cli_y - tap_y, 2))) *
                        111110, 1)
                    rowTAP = [
                        n[3], n[4], n[5], [n[3] + n[4] + n[5]][0], "SI", n[6],
                        n[7], n[8], n[9], dist, n[10], n[11]
                    ]
                    listaTAP.append(rowTAP)

            listaTAP.sort(key=lambda x: x[9])  # reverse=True
            if len(listaTAP) > 0:
                ws["N{}".format(reg)] = listaTAP[0][0]
                ws["O{}".format(reg)] = listaTAP[0][1]
                ws["P{}".format(reg)] = listaTAP[0][2]
                ws["Q{}".format(reg)] = listaTAP[0][3]
                ws["R{}".format(reg)] = listaTAP[0][4]
                ws["S{}".format(reg)] = listaTAP[0][5]
                ws["T{}".format(reg)] = listaTAP[0][6]
                ws["U{}".format(reg)] = listaTAP[0][7]
                ws["V{}".format(reg)] = listaTAP[0][8]
                ws["W{}".format(reg)] = listaTAP[0][9]
                ws["X{}".format(reg)] = listaTAP[0][10]
                ws["Y{}".format(reg)] = listaTAP[0][11]
            if len(listaTAP) > 1:
                ws["Z{}".format(reg)] = listaTAP[1][4]
                ws["AA{}".format(reg)] = listaTAP[1][5]
                ws["AB{}".format(reg)] = listaTAP[1][6]
                ws["AC{}".format(reg)] = listaTAP[1][7]
                ws["AD{}".format(reg)] = listaTAP[1][8]
                ws["AE{}".format(reg)] = listaTAP[1][9]
                ws["AF{}".format(reg)] = listaTAP[1][10]
                ws["AG{}".format(reg)] = listaTAP[1][11]
            if len(listaTAP) > 2:
                ws["AH{}".format(reg)] = listaTAP[2][4]
                ws["AI{}".format(reg)] = listaTAP[2][5]
                ws["AJ{}".format(reg)] = listaTAP[2][6]
                ws["AK{}".format(reg)] = listaTAP[2][7]
                ws["AL{}".format(reg)] = listaTAP[2][8]
                ws["AM{}".format(reg)] = listaTAP[2][9]
                ws["AN{}".format(reg)] = listaTAP[2][10]
                ws["AO{}".format(reg)] = listaTAP[2][11]
            reg = reg + 1
            print reg

            arcpy.SelectLayerByAttribute_management(
                napfl, "CLEAR_SELECTION")  #Limpiar seleccion
            arcpy.SelectLayerByAttribute_management(
                tapfl, "CLEAR_SELECTION")  #Limpiar seleccion
            arcpy.SelectLayerByAttribute_management(
                ainfl, "CLEAR_SELECTION")  #Limpiar seleccion
        except:
            print traceback.format_exc()
Ejemplo n.º 10
0
env.workspace = r"in_memory"
AddFieldIfNotexists(v_points_dissolve, "Unique_in_1km", "Short")
l_points_intersect = arcpy.MakeFeatureLayer_management (v_points_dissolve, "temp_layer")
        
cursor = arcpy.da.UpdateCursor(v_points_dissolve, ["SHAPE@", "Type_corr", "Unique_in_1km"])
for row in cursor:
    geom = row[0]
    type = row[1]
   
    # Create buffer
    v_point_buffer = "temp_point_buffer"
    arcpy.Buffer_analysis(geom, v_point_buffer, "1000")
    
    # Select features with the same type which fall in buffer
    arcpy.SelectLayerByAttribute_management(l_points_intersect, "NEW_SELECTION", "Type_corr='{}'".format(type))
    arcpy.SelectLayerByLocation_management(l_points_intersect, "INTERSECT", v_point_buffer, "", "SUBSET_SELECTION")
    arcpy.SelectLayerByLocation_management(l_points_intersect, "ARE_IDENTICAL_TO", geom, "", "REMOVE_FROM_SELECTION")
    
    same_activity_count = int(arcpy.GetCount_management(l_points_intersect).getOutput(0)) 
    if same_activity_count == 0:
        row[2] = 1
    else:
        row[2] = 0
    cursor.updateRow(row)
    arcpy.Delete_management(v_point_buffer)

# 6. summarize by recreational area
t_points_summary =  "temp_points_summary"
arcpy.Statistics_analysis(v_points_dissolve, t_points_summary, [["JOIN_ID","COUNT"], ["Unique_in_1km", "SUM"]], "JOIN_ID")   

    
Ejemplo n.º 11
0
    arcpy.gp.Con_sa(slr_extent, CONDITION_TRUE, slr_ext_con, "", "Value =1")
 
    # NUMBER OF HOUSES LOST AT MWHS ###########################################  
    # Process: Region Group - assigns a number to each connected region for cells (i.e. with value 1 from Con)
    # As per NOAA inundation mapping guidance (NOAA, 2017)
    arcpy.gp.RegionGroup_sa(slr_ext_con, slrgroup, "EIGHT", "WITHIN", "NO_LINK", "")
    # Process: Raster to Polygon
    arcpy.RasterToPolygon_conversion(slrgroup, slr_poly, "SIMPLIFY", "Value")
    # Process: Select - Max polygon
    arcpy.Select_analysis(slr_poly, max_slr_poly, "Shape_Area=(SELECT MAX(Shape_Area) FROM slr_poly)")
    # Process: Select - Private residential dwellings as building type
    arcpy.Select_analysis(T2DLanduse_Buildings, private_houses, "TYPE = 'Residential'")    
      
    # Process: Select Layer By Location
    arcpy.MakeFeatureLayer_management(T2DLanduse_Buildings,"Buildings")
    building_count_result = arcpy.GetCount_management(arcpy.SelectLayerByLocation_management("Buildings", "INTERSECT", max_slr_poly, "", "NEW_SELECTION", "NOT_INVERT"))
    arcpy.MakeFeatureLayer_management(private_houses,"houses")
    house_count = arcpy.GetCount_management(arcpy.SelectLayerByLocation_management("houses", "INTERSECT", max_slr_poly, "", "NEW_SELECTION", "NOT_INVERT")) 
    slr_house = int(house_count.getOutput(0))
    # Print output from GetCount process
    print "The number of houses lost is " + str(slr_house) + " (of the " + str(int(building_count_result.getOutput(0))) + " buildings affected)"
    #Add results to seperate list/column
    SLR_Number_House.append(slr_house)     
     
    # AVERAGE BEACH WIDTH (m) #################################################
    if SLRscn == 0:
        Transect1 = arcpy.da.TableToNumPyArray(arcpy.Erase_analysis(beachtrans1,slr_poly, beachwid1, ""),'SHAPE_Length')
        Transect_1_sum = Transect1["SHAPE_Length"].sum()
        Transect2 = arcpy.da.TableToNumPyArray(arcpy.Erase_analysis(beachtrans2,slr_poly, beachwid2, ""),'SHAPE_Length')
        Transect_2_sum = Transect2["SHAPE_Length"].sum()
        Transect3 = arcpy.da.TableToNumPyArray(arcpy.Erase_analysis(beachtrans3,slr_poly, beachwid3, ""),'SHAPE_Length')
Ejemplo n.º 12
0
def DefinePopulation(study_area, out_points,
                     newsite_meth="NUMBER", newsite_val=10000, site_points=None):
    """Define population

    study_area    input study area polygons
    out_points    output points
    newsite_meth  new site generation parameter: "NUMBER","DISTANCE"
    newsite_val   value to use for above method
    site_points   existing site points
    """

    try:

        # initialize temp file variables
        lyrStudy, lyrSites, tmpFC, tmpRas, tmpPoints, numSites = [None] * 6
        rasWK = None

        fmtI = "  {0:<35s}{1:>8}"        # format to report integer/string values
        fmtF = "  {0:<35s}{1:>8.1f} {2}" # format to report float values w/ units

        lyrStudy = "lyr1"
        arcpy.MakeFeatureLayer_management(study_area, lyrStudy)

        # set processing environment
        arcpy.env.workspace = os.path.dirname(out_points)
        D = arcpy.Describe(study_area)
        env.extent = ext = D.extent
        env.outputCoordinateSystem = D.spatialReference
        xyUnits = D.spatialReference.linearUnitName
        arcpy.ClearEnvironment("snapRaster")
        rasWK = ScratchFolder()

        procLabel = "Defining population characteristics"
        GPMsg(procLabel)

        if not site_points:

            GPMsg("  Creating points...")

            # Prepare a population of inside study area

            if newsite_meth == "NUMBER":
                newsite_val = int(newsite_val)
                GPMsg(fmtI.format('Approximate number of sites:', newsite_val))
                samp_dist = ((ext.width * ext.height) / newsite_val) ** 0.5
            elif newsite_meth == "DISTANCE":
                samp_dist = float(newsite_val)
            else:
                raise Exception("Invalid new site method " + newsite_meth)

            GPMsg(fmtF.format(
                'Sample distance:', samp_dist, xyUnits.lower()[0]))

            # randomize the lattice origin
            xmin = ext.XMin - samp_dist * random.random()
            ymin = ext.YMin - samp_dist * random.random()
            env.extent = arcpy.Extent(xmin, ymin, ext.XMax, ext.YMax)

            # Report number sites
            n = int((env.extent.width * env.extent.height) /
                    (samp_dist ** 2))
            GPMsg(fmtI.format(
                "Building a population with", n) + " sites")

            # Create a raster covering the the study area
            tmpRas = arcpy.CreateScratchName("saras", "", "raster", rasWK)
            arcpy.FeatureToRaster_conversion(lyrStudy, D.OIDFieldName,
                                             tmpRas, samp_dist)

            # check raster - are there data cells?
            try:
                arcpy.GetRasterProperties_management(tmpRas, "MINIMUM")
            except:
                GPMsg()
                raise MsgError("No points created")

            # Generate a point lattice from raster cell centroids
            tmpPoints = arcpy.CreateScratchName("pt", "",
                                                "featureclass", rasWK)
            arcpy.RasterToPoint_conversion(tmpRas, tmpPoints, "VALUE")
            lyrSites = "lyrSites"
            arcpy.MakeFeatureLayer_management(tmpPoints, lyrSites)
            arcpy.DeleteField_management(lyrSites, "GRID_CODE;GRIDCODE")

            # count points
            numSites = int(arcpy.GetCount_management(lyrSites).getOutput(0))
            GPMsg(fmtI.format("Points inside study area:", numSites))

        else:

            # Select points from an existing point feature class

            lyrSites = "lyrSites"
            arcpy.MakeFeatureLayer_management(site_points, lyrSites)
            numSites = int(arcpy.GetCount_management(lyrSites).getOutput(0))
            # select points within study area
            arcpy.SelectLayerByLocation_management(lyrSites, "WITHIN", lyrStudy)

            # check number of sites selected
            numSelected = int(arcpy.GetCount_management(lyrSites).getOutput(0))
            if not numSelected:
                raise MsgError("No points selected")
            nsel = "{0}/{1}".format(numSelected, numSites)
            GPMsg(fmtI.format("Points inside study area:", nsel))
            numSites = numSelected

        # copy points to output
        arcpy.CopyFeatures_management(lyrSites, out_points)

    except MsgError, xmsg:
        GPMsg("e", str(xmsg))
Ejemplo n.º 13
0
cityIDStringField = "CI_FIPS"  # Name of column with city IDs
citiesWithTwoParkAndRides = 0  # Used for counting cities with at least two P & R facilities
numCities = 0  # Used for counting cities in total
# Make a feature layer of all the park and ride facilities
arcpy.MakeFeatureLayer_management(parkAndRide, "ParkAndRideLayer")
# Make an update cursor and loop through each city
with arcpy.da.UpdateCursor(cityBoundaries, (cityIDStringField, parkAndRideField)) as cityRows:
    for city in cityRows:
    # Create a query string for the current city
        cityIDString = city[0]
        queryString = '"' + cityIDStringField + '" = ' + "'" + cityIDString + "'"
        # Make a feature layer of just the current city polygon
        arcpy.MakeFeatureLayer_management(cityBoundaries, "CurrentCityLayer", queryString)
        try:
            # Narrow down the park and ride layer by selecting only the park and rides in the current city
            arcpy.SelectLayerByLocation_management("ParkAndRideLayer", "CONTAINED_BY", "CurrentCityLayer")
            # Count the number of park and ride facilities selected
            selectedParkAndRideCount = arcpy.GetCount_management("ParkAndRideLayer")
            numSelectedParkAndRide = int(selectedParkAndRideCount.getOutput(0))
            # If more than two park and ride facilities found, update the row to TRUE
            if numSelectedParkAndRide >= 2:
                city[1] = "TRUE"
                # Don't forget to call updateRow
                cityRows.updateRow(city)
                # Add 1 to your tally of cities with two park and rides
                citiesWithTwoParkAndRides += 1
        finally:
            # Delete current cities layer to prepare for next run of loop
            arcpy.Delete_management("CurrentCityLayer")
            numCities +=1
# Clean up park and ride feature layer
# Census Block Groups
if run_bg == "YES":
    bg_list = []
    with arcpy.da.SearchCursor(block_group_lyr, "GEOID") as cursor:
        for row in cursor:
            bg_list.append(row[0])
            del row
    del cursor

    bg_list.sort()
    for bg in bg_list:
        q1 = """ GEOID = """ + "\'" + str(bg) + "\'"
        arcpy.SelectLayerByAttribute_management(block_group_lyr,
                                                "NEW_SELECTION", q1)
        arcpy.SelectLayerByLocation_management(blocks_lyr,
                                               "WITHIN",
                                               block_group_lyr,
                                               selection_type="NEW_SELECTION")
        block_output = "BG_" + str(bg)
        arcpy.MakeFeatureLayer_management(blocks_lyr, block_output)

        bg_pop = []  # Population of each Block in Block Group

        dist_count = []  # Create empty list to hold block scores for distance
        acre_count = []  # Create empty list to hold block scores for acreage
        park_count = []  # Create empty list to hold block scores for parks

        fields = ["GEOID10", "POP"]
        with arcpy.da.SearchCursor(
                block_output,
                fields) as cursor:  # Search cursor in Census Block layer
            for row in cursor:
# Save the output raster as clipped by the neighborhood geometry
somaElev.save(somaOutput)
print 'extraction finished'


# Create an output file path for a new raster with feet elevation values
somaOutFeet = sfElevation.replace('sf_elevation','SOMA_feet')

# Convert the elevation values from meters to feet
outTimes = arcpy.sa.Times(somaOutput, 3.28084)
outTimes.save(somaOutFeet)
print 'conversion complete'

# Get the polygon geometry of the SOMA neighborhood
with arcpy.da.SearchCursor(sanFranciscoHoods,['SHAPE@'],sql) as cursor:
    for row in cursor:
        somaPoly = row[0]

# Make the Bus Stops into a feature layer
arcpy.MakeFeatureLayer_management(busStops, 'soma_stops')

# Select the bus stops that intersect wtih the SOMA neighborhood polygon
arcpy.SelectLayerByLocation_management("soma_stops", "INTERSECT", somaPoly)

# Save the bus stops with a new elevation value field.
outStops = r'C:\Projects\SanFrancisco.gdb\Chapter7Results\SoMaStops'
arcpy.sa.ExtractValuesToPoints("soma_stops", somaOutFeet,
                      outStops,"INTERPOLATE",
                      "VALUE_ONLY")
print 'points generated'
Ejemplo n.º 16
0
        # print
        # print arcpy.GetMessages()

        # Write the selected features to a new featureclass  (NOT SURE THIS IS NEEDED, BUT GOOD FOR
        # MAKING SURE ITS WORKING PROPERLY
        arcpy.CopyFeatures_management(bearLayer, arcpy.env.workspace + "/" + bearLayer)
        # print
        # print arcpy.GetMessages()

        # Need to make parcel data into a layer for selection
        arcpy.MakeFeatureLayer_management(housesFC, housesLayer)

        # Select parcels that intersect with bear density tier
        # SelectLayerByLocation_management (in_layer, {overlap_type}, {select_features}, {search_distance},
        # {selection_type}, {invert_spatial_relationship})
        arcpy.SelectLayerByLocation_management(housesLayer, "INTERSECT", bearLayer, "",
                                           "NEW_SELECTION", "NOT_INVERT")
        # print
        # print arcpy.GetMessages()

        # Count number of parcels in each Tier
        result = arcpy.GetCount_management(housesLayer)
        print "count = " + str(int(result.getOutput(0)))
        #print "nn = " + str(nn)
        parcelCounts.append(int(result.getOutput(0)))
        #print "Number of parcels in tier {0} = {1}".format(nn,count)

        # print
        # print arcpy.GetMessages()

        # Save selected parcels to database
        arcpy.CopyFeatures_management(housesLayer, arcpy.env.workspace + "/" + housesLayer)
Ejemplo n.º 17
0
def main(workspace, areaOfInterest, albertaloticRiparian,
         albertaMergedWetlandInventory, quarterSectionBoundaries,
         parksProtectedAreasAlberta, humanFootprint):

    # Import necesarry modules
    import numpy as np
    import arcpy

    # Overwrite output and checkout neccesary extensions
    arcpy.env.overwriteOutput = True
    arcpy.CheckOutExtension("spatial")

    # assign workspace
    arcpy.env.workspace = workspace

    # First we project our parcel data into the correct projection, create a layer file, then select only parcels we are interested in with Select by Attribute
    # and Select by Location (Intersecting tht Area of Interest polygon), then export this selection to a new feature class called "ParcelsFinal"

    # Local Variables
    quarterSectionBoundaries_project = "quarterSectionBoundaries_project"
    quarterSectionBoundaries_project_layer = "quarterSectionBoundaries_project_layer"
    ParcelsFinal = "ParcelsFinal"

    # Process: Project
    arcpy.Project_management(
        quarterSectionBoundaries, quarterSectionBoundaries_project,
        "PROJCS['NAD_1983_10TM_AEP_Forest',GEOGCS['GCS_North_American_1983',DATUM['D_North_American_1983',SPHEROID['GRS_1980',6378137.0,298.257222101]],PRIMEM['Greenwich',0.0],UNIT['Degree',0.0174532925199433]],PROJECTION['Transverse_Mercator'],PARAMETER['False_Easting',500000.0],PARAMETER['False_Northing',0.0],PARAMETER['Central_Meridian',-115.0],PARAMETER['Scale_Factor',0.9992],PARAMETER['Latitude_Of_Origin',0.0],UNIT['Meter',1.0]]",
        "",
        "GEOGCS['GCS_North_American_1983',DATUM['D_North_American_1983',SPHEROID['GRS_1980',6378137.0,298.257222101]],PRIMEM['Greenwich',0.0],UNIT['Degree',0.0174532925199433]]",
        "NO_PRESERVE_SHAPE", "", "NO_VERTICAL")

    # Process: Make Feature Layer
    arcpy.MakeFeatureLayer_management(
        quarterSectionBoundaries_project,
        quarterSectionBoundaries_project_layer, "", "",
        "OBJECTID OBJECTID VISIBLE NONE;Shape Shape VISIBLE NONE;MER MER VISIBLE NONE;RGE RGE VISIBLE NONE;TWP TWP VISIBLE NONE;SEC SEC VISIBLE NONE;QS QS VISIBLE NONE;RA RA VISIBLE NONE;PARCEL_ID PARCEL_ID VISIBLE NONE;Shape_length Shape_length VISIBLE NONE;Shape_area Shape_area VISIBLE NONE"
    )

    # selects all parcels intersecting the users area of interest
    # Process: Select Layer By Location
    arcpy.SelectLayerByLocation_management(
        quarterSectionBoundaries_project_layer, "INTERSECT", areaOfInterest,
        "", "NEW_SELECTION", "NOT_INVERT")

    # Removes roads from parcel data to ensure that only quarter sections are selected
    # Process: Select Layer By Attribute
    arcpy.SelectLayerByAttribute_management(
        quarterSectionBoundaries_project_layer, "SUBSET_SELECTION",
        "RA NOT LIKE 'R'")

    # Process: Copy Features
    arcpy.CopyFeatures_management(quarterSectionBoundaries_project_layer,
                                  ParcelsFinal, "", "0", "0", "0")

    # ############### ArcGis MODEL BUILDER SECTION: for initial Geoproccessing #################################################################################################################

    # The following was exported from ArcMap's Model builder. It performs most of the neccessary geoprocessing needed to determine the spatial relationships
    # between the parcels and the user provided data (Human footprint, Lotic(Riparian), Wetlands, Patch Size, and Proximity)

    # local Variables:
    footprint_EXTENT_CLIPPED = "Footprint_Extent_Clipped"
    Footprint_Inverse = "Footprint_Inverse"
    Intact_Area_Per_Parcel = "Intact_Area_Per_Parcel"
    Wetland_Extent_Clipped = "Wetland_Extent_Clipped"
    Wetland_Lines = "Wetland_Lines"
    Wetland_Edge_Per_Parcel = "Wetland_Edge_Per_Parcel"
    Lotic_Extent_Clipped = "Lotic_Extent_Clipped"
    Lotic_No_Wetlands = "Lotic_No_Wetlands"
    Lotic_Area_Per_Parcel = "Lotic_Area_Per_Parcel"
    Area_Of_Interest_Buffered = "Area_Of_Interest_Buffered"
    Footprint_Larger_Extent = "Footprint_Larger_Extent"
    Footprint_INVERSE_Large = "Footprint_INVERSE_Large"
    Footprint_INVERSE_Large_Explode = "Footprint_INVERSE_Large_Explode"

    # Process: Clip
    arcpy.Clip_analysis(humanFootprint, ParcelsFinal, footprint_EXTENT_CLIPPED,
                        "")

    # Process: Erase
    arcpy.Erase_analysis(ParcelsFinal, footprint_EXTENT_CLIPPED,
                         Footprint_Inverse, "")

    #
    # Process: Tabulate Intersection
    arcpy.TabulateIntersection_analysis(ParcelsFinal, "OBJECTID",
                                        Footprint_Inverse,
                                        Intact_Area_Per_Parcel, "", "", "",
                                        "UNKNOWN")

    # Process: Clip (3)
    arcpy.Clip_analysis(albertaMergedWetlandInventory, ParcelsFinal,
                        Wetland_Extent_Clipped, "")

    # Process: Feature To Line
    arcpy.FeatureToLine_management(Wetland_Extent_Clipped, Wetland_Lines, "",
                                   "ATTRIBUTES")
    ##arcpy.FeatureToLine_management("'D:\\evanamiesgalonskiMOBILE\\1 Courses\\329\\Final Project\\DATA\\test results.gdb\\Wetland_Extent_Clipped'", Wetland_Lines, "", "ATTRIBUTES")

    # Process: Tabulate Intersection (2)
    arcpy.TabulateIntersection_analysis(ParcelsFinal, "OBJECTID",
                                        Wetland_Lines, Wetland_Edge_Per_Parcel,
                                        "", "", "", "UNKNOWN")

    # Process: Clip (4)
    arcpy.Clip_analysis(albertaloticRiparian, ParcelsFinal,
                        Lotic_Extent_Clipped, "")

    # Process: Erase (2)
    arcpy.Erase_analysis(Lotic_Extent_Clipped, Wetland_Extent_Clipped,
                         Lotic_No_Wetlands, "")

    # Process: Tabulate Intersection (3)
    arcpy.TabulateIntersection_analysis(ParcelsFinal, "OBJECTID",
                                        Lotic_No_Wetlands,
                                        Lotic_Area_Per_Parcel, "", "", "",
                                        "UNKNOWN")

    # Process: Buffer
    arcpy.Buffer_analysis(areaOfInterest, Area_Of_Interest_Buffered,
                          "50 Kilometers", "FULL", "ROUND", "NONE", "",
                          "PLANAR")

    # Process: Clip (2)
    arcpy.Clip_analysis(humanFootprint, Area_Of_Interest_Buffered,
                        Footprint_Larger_Extent, "")

    # Process: Erase (3)
    arcpy.Erase_analysis(Area_Of_Interest_Buffered, Footprint_Larger_Extent,
                         Footprint_INVERSE_Large, "")

    # Process: Multipart To Singlepart
    arcpy.MultipartToSinglepart_management(Footprint_INVERSE_Large,
                                           Footprint_INVERSE_Large_Explode)

    # ###########################################################################################################################################################################

    # This part of the script edits the nwely created tables that contain information about the instersection of Wetlands, Lotic, and Intactness data with the land parcels
    # The Area and Percent coverage fields are renamed to be more decriptive and to ensure there are no confusing duplicate field names in our ParcelsFinal feature class.

    # Alter Field names in intactness table
    arcpy.AlterField_management(Intact_Area_Per_Parcel,
                                "AREA",
                                new_field_name="Area_Intact",
                                field_is_nullable="NULLABLE")
    arcpy.AlterField_management(Intact_Area_Per_Parcel,
                                "PERCENTAGE",
                                new_field_name="Percent_Intact",
                                field_is_nullable="NULLABLE")

    # Alter field names in lotic_table
    arcpy.AlterField_management(Lotic_Area_Per_Parcel,
                                "AREA",
                                new_field_name="Area_Lotic",
                                field_is_nullable="NULLABLE")
    arcpy.AlterField_management(Lotic_Area_Per_Parcel,
                                "PERCENTAGE",
                                new_field_name="Percent_Lotic",
                                field_is_nullable="NULLABLE")

    # Alter Field name in wetlands_table
    arcpy.AlterField_management(Wetland_Edge_Per_Parcel,
                                "LENGTH",
                                new_field_name="Wetland_Edge",
                                field_is_nullable="NULLABLE")

    # Now we will join the desired fields from the 3 tables (intactness, lotic, ad wetlands) to the Land Parcel feature class

    # Process: Join Field
    arcpy.JoinField_management(ParcelsFinal, "OBJECTID",
                               Intact_Area_Per_Parcel, "OBJECTID_1",
                               ["Area_Intact", "Percent_Intact"])

    # Process: Join Field (2)
    arcpy.JoinField_management(ParcelsFinal, "OBJECTID", Lotic_Area_Per_Parcel,
                               "OBJECTID_1", ["Area_Lotic", "Percent_Lotic"])

    # Process: Join Field (3)
    arcpy.JoinField_management(ParcelsFinal, "OBJECTID",
                               Wetland_Edge_Per_Parcel, "OBJECTID_1",
                               "Wetland_Edge")

    # Now we get rid of null values in our new fields and replace them with zeros

    with arcpy.da.UpdateCursor(ParcelsFinal, ["Area_Intact"]) as cursor:
        for row in cursor:
            if row[0] == None:
                row[0] = 0
                cursor.updateRow(row)

    with arcpy.da.UpdateCursor(ParcelsFinal, ["Percent_Intact"]) as cursor:
        for row in cursor:
            if row[0] == None:
                row[0] = 0
                cursor.updateRow(row)

    with arcpy.da.UpdateCursor(ParcelsFinal, ["Area_Lotic"]) as cursor:
        for row in cursor:
            if row[0] == None:
                row[0] = 0
                cursor.updateRow(row)

    with arcpy.da.UpdateCursor(ParcelsFinal, ["Percent_Lotic"]) as cursor:
        for row in cursor:
            if row[0] == None:
                row[0] = 0
                cursor.updateRow(row)

    with arcpy.da.UpdateCursor(ParcelsFinal, ["Wetland_Edge"]) as cursor:
        for row in cursor:
            if row[0] == None:
                row[0] = 0
                cursor.updateRow(row)

    # This section of the script calculates the largest intact patch that intersects each parcel

    # Local Variables
    Footprint_INVERSE_Large_Explode = "Footprint_INVERSE_Large_Explode"
    Patch_Sizes_Per_Parcel = "Patch_Sizes_Per_Parcel"

    # Process: Tabulate Intersection
    arcpy.TabulateIntersection_analysis(ParcelsFinal, "OBJECTID",
                                        Footprint_INVERSE_Large_Explode,
                                        Patch_Sizes_Per_Parcel, "SHAPE_Area",
                                        "", "", "UNKNOWN")

    # A table was created with Tabulate Intersection that contains the areas of all intact patches that intersect
    # each parcel. We have several duplicates of each Parcel OBJECTID in this table, one for every patch that intersects a parcel.
    # we need to determine which duplicate OBJECTID corresponds to the largest patch area.

    # First we get a full list of the object IDs in our clipped ParcelsFinal Class
    # even though there is only one value in each cell of the attribute table, the data type is a tuple, so we need to extract our value our of it, as with a list
    parcel_IDs_extracted = []
    parcel_IDs = arcpy.da.SearchCursor(ParcelsFinal, "OBJECTID")
    for ID in parcel_IDs:
        if isinstance(ID, tuple):
            ID = ID[0]
            parcel_IDs_extracted.append(ID)
        else:
            parcel_IDs_extracted.append(ID)

    Patch_Sizes_Per_Parcel = "Patch_Sizes_Per_Parcel"

    ##    # remove null values
    ##    with arcpy.da.UpdateCursor(Patch_Sizes_Per_Parcel, ["SHAPE_Area"]) as cursor:
    ##        for row in cursor:
    ##            if row[0] == None:
    ##                row[0] = 0
    ##                cursor.updateRow(row)

    # Now we get a full list of all of the Parcel Object ID that had at least one intersection with the "Intact" feature class (human footprint inverse)
    # NOTE: not all of the parcels in our area of interest necessarily intersect with the "Intact" feature class
    patch_IDs = arcpy.da.SearchCursor(Patch_Sizes_Per_Parcel, "OBJECTID_1")
    patch_IDs_extracted = []
    for ID in patch_IDs:
        if isinstance(ID, tuple):
            ID = ID[0]
            patch_IDs_extracted.append(ID)
        elif isinstance(ID, str):
            patch_IDs_extracted.append(ID)

    # initialize 2 new lists
    orderedListofLists = []
    newlist = []
    # for each OBJECT ID we create a list of areas which are the intsects for a parcel, then append that list as an element in our list of lists (orderedListofLists)
    # the newlist is re-initialized every intereation after it has dumped its values into the orderedlistoflists. The orderedlistoflists is not re-initialized, and continues to be appended to.
    # Now the intersections for each parcel are nicely grouped together
    for ID in parcel_IDs_extracted:
        patch_IDs_and_Areas = arcpy.da.SearchCursor(
            Patch_Sizes_Per_Parcel, ["OBJECTID_1", "SHAPE_Area"])
        if ID not in patch_IDs_extracted:  # This step ensures that parcels that have not intersection receive a zero instead of being glossed over. This will maintain order of our field values.
            orderedListofLists.append(0)
        else:
            newlist = []
            for rows in patch_IDs_and_Areas:
                if ID == rows[0]:
                    x = rows[1]
                    newlist.append(x)
            orderedListofLists.append(newlist)

    # initialize one more list
    # Since the intersections(areas) are grouped by parcel, we extract the highest number in each list element (which is a list), and this give us the largest patch size for each parcel.
    max_patch_size_per_parcel = []

    for patchSizes in orderedListofLists:
        if patchSizes == 0:
            max_patch_size_per_parcel.append(0)
        else:
            max_patch_size_per_parcel.append(max(patchSizes))

    # convert to acres for scoring
    max_patch_size_per_parcel_acres = []
    acre = 0
    for patchsize in max_patch_size_per_parcel:
        acre = patchsize / 4046.86
        max_patch_size_per_parcel_acres.append(acre)

    # Now we have a list that contains the largest patch that intersects each parcel.
    # It is ordered the same as the OBJECTID and we can now create a new field in the parcels feature class and
    # iteratively polulate the rows with each patch area value

    # create new field
    arcpy.AddField_management(ParcelsFinal,
                              "Largest_Patch_Area",
                              "DOUBLE",
                              field_length=50)

    # initialize x
    x = 0

    # use update cursor to populate rows and after each time the cursor moves down to the next row,
    # iterate to the next list element (x)
    with arcpy.da.UpdateCursor(ParcelsFinal, "Largest_Patch_Area") as cursor:
        for row in cursor:
            row[0] = max_patch_size_per_parcel_acres[x]
            cursor.updateRow(row)
            x += 1

    # the following code calculates the nearest protected area feature and automatically creates a new field that contains that distance for each parcel.
    # Process: Near
    arcpy.Near_analysis(ParcelsFinal, parksProtectedAreasAlberta, "",
                        "NO_LOCATION", "NO_ANGLE", "PLANAR")

    # #######################################################################################################################################################################################################

    # The next section of code calulates the scores for each parcel based on the values is our newly added/created fields.

    # ##################### INTACTNESS SCORE #######################

    # extract percent intact field
    intact_scores = []
    percent_intact = arcpy.da.SearchCursor(ParcelsFinal, "Percent_Intact")
    # Perform calulation for score and append to new list. Accomodate for str and tuple field types
    for percent in percent_intact:
        if isinstance(percent, tuple):
            percent = percent[0] / 100
        elif isinstance(percent, str):
            percent = float(percent)
        intact_scores.append(percent)

    # create new field
    arcpy.AddField_management(ParcelsFinal,
                              "SCORE_Intactness",
                              "DOUBLE",
                              field_length=50)

    x = 0

    # use update cursor to populate rows with list element and after each time the cursor moves down to the next row,
    # iterate to the next list element (x)
    with arcpy.da.UpdateCursor(ParcelsFinal, "SCORE_Intactness") as cursor:
        for row in cursor:
            row[0] = intact_scores[x]
            cursor.updateRow(row)
            x += 1

    # ################### Lotic (Riparian) Score #########################

    # extract percent lotic field
    lotic_percent_list = []
    percent_lotic = arcpy.da.SearchCursor(ParcelsFinal, "Percent_Lotic")
    #  Accomodate for str and tuple field types
    for percent in percent_lotic:
        if isinstance(percent, tuple):
            percent = percent[0]
        elif isinstance(percent, str):
            percent = float(percent)
        lotic_percent_list.append(percent)

    # now we create a create a lotic percent list no zeros before establishing ranges for deciles
    lotic_percent_list_noZero = []
    for percent in lotic_percent_list:
        if percent != 0:
            lotic_percent_list_noZero.append(percent)

    # use numbpy to calculate the decile ranges
    ranges = np.percentile(lotic_percent_list_noZero, np.arange(0, 100, 10))

    # iterate through origincal lotic percent list and use the decile ranges to bin the lotic percent values to the appropriate scores
    final_lotic_scores = []
    for percent in lotic_percent_list:
        if percent == 0:
            final_lotic_scores.append(0)
        elif percent >= ranges[0] and percent <= ranges[1]:
            final_lotic_scores.append(0.1)
        elif percent >= ranges[1] and percent <= ranges[2]:
            final_lotic_scores.append(0.2)
        elif percent >= ranges[2] and percent <= ranges[3]:
            final_lotic_scores.append(0.3)
        elif percent >= ranges[3] and percent <= ranges[4]:
            final_lotic_scores.append(0.4)
        elif percent >= ranges[4] and percent <= ranges[5]:
            final_lotic_scores.append(0.5)
        elif percent >= ranges[5] and percent <= ranges[6]:
            final_lotic_scores.append(0.6)
        elif percent >= ranges[6] and percent <= ranges[7]:
            final_lotic_scores.append(0.7)
        elif percent >= ranges[7] and percent <= ranges[8]:
            final_lotic_scores.append(0.8)
        elif percent >= ranges[8] and percent <= ranges[9]:
            final_lotic_scores.append(0.9)
        elif percent >= ranges[9]:
            final_lotic_scores.append(1)

    # the order of the resulting list is identical to the original, so it can be appended as a new field and the values will
    # correspond with the rows they are meant to score

    # create new field
    arcpy.AddField_management(ParcelsFinal,
                              "SCORE_Lotic_Deciles",
                              "DOUBLE",
                              field_length=50)

    x = 0

    # use update cursor to populate rows with list element and after each time the cursor moves down to the next row,
    # iterate to the next list element (x)
    with arcpy.da.UpdateCursor(ParcelsFinal, "SCORE_Lotic_Deciles") as cursor:
        for row in cursor:
            row[0] = final_lotic_scores[x]
            cursor.updateRow(row)
            x += 1

    # ######################### Wetland Score #####################

    # extract Wetland edge length field
    wetland_edge_list = []
    wetland_field = arcpy.da.SearchCursor(ParcelsFinal, "Wetland_Edge")

    # append values to new list. Accomodate for str and tuple field types.
    for length in wetland_field:
        if isinstance(length, tuple):
            length = length[0]
        elif isinstance(length, str):
            length = float(length)
        wetland_edge_list.append(length)

    # now we create a create a wetland edge list no zeros before establishing ranges for deciles
    wetland_edge_list_noZero = []
    for edge_length in wetland_edge_list:
        if edge_length != 0:
            wetland_edge_list_noZero.append(edge_length)

    # use numbpy to calculate the decile ranges
    ranges = np.percentile(wetland_edge_list_noZero, np.arange(0, 100, 10))

    # iterate through original wetland edge list and use the decile ranges to bin the wetland edge values to the appropriate scores
    final_wetland_scores = []
    for edge_length in wetland_edge_list:
        if edge_length == 0:
            final_wetland_scores.append(0)
        elif edge_length >= ranges[0] and edge_length <= ranges[1]:
            final_wetland_scores.append(0.1)
        elif edge_length >= ranges[1] and edge_length <= ranges[2]:
            final_wetland_scores.append(0.2)
        elif edge_length >= ranges[2] and edge_length <= ranges[3]:
            final_wetland_scores.append(0.3)
        elif edge_length >= ranges[3] and edge_length <= ranges[4]:
            final_wetland_scores.append(0.4)
        elif edge_length >= ranges[4] and edge_length <= ranges[5]:
            final_wetland_scores.append(0.5)
        elif edge_length >= ranges[5] and edge_length <= ranges[6]:
            final_wetland_scores.append(0.6)
        elif edge_length >= ranges[6] and edge_length <= ranges[7]:
            final_wetland_scores.append(0.7)
        elif edge_length >= ranges[7] and edge_length <= ranges[8]:
            final_wetland_scores.append(0.8)
        elif edge_length >= ranges[8] and edge_length <= ranges[9]:
            final_wetland_scores.append(0.9)
        elif edge_length >= ranges[9]:
            final_wetland_scores.append(1)

    # the order of the resulting list is identical to the original, so it can be appended as a new field and the values will
    # correspond with the rows they are meant to score

    # create new field
    arcpy.AddField_management(ParcelsFinal,
                              "SCORE_Wetland_Deciles",
                              "DOUBLE",
                              field_length=50)

    x = 0

    # use update cursor to populate rows with list element and after each time the cursor moves down to the next row,
    # iterate to the next list element (x)
    with arcpy.da.UpdateCursor(ParcelsFinal,
                               "SCORE_Wetland_Deciles") as cursor:
        for row in cursor:
            row[0] = final_wetland_scores[x]
            cursor.updateRow(row)
            x += 1

    # ################ Patch size score ####################

    # extract patch size field
    largest_patch_sizes = []
    patch_sizes = arcpy.da.SearchCursor(ParcelsFinal, "Largest_Patch_Area")
    # Perform calulation for score and append to new list. Accomodate for str and tuple field types
    for size in patch_sizes:
        if isinstance(size, tuple):
            size = size[0]
        elif isinstance(size, str):
            size = float(size)
        largest_patch_sizes.append(size)

    # now we populate a new list and assign scores based on number ranges
    patch_size_scores = []
    for size in largest_patch_sizes:
        if size < 160:
            patch_size_scores.append(0)
        elif size > 160 and size < 2500:
            patch_size_scores.append(0.5)
        elif size > 2500 and size < 10000:
            patch_size_scores.append(.75)
        elif size > 10000:
            patch_size_scores.append(1)

    # create new field
    arcpy.AddField_management(ParcelsFinal,
                              "SCORE_Patch_Size",
                              "DOUBLE",
                              field_length=50)

    x = 0

    # use update cursor to populate rows with list element and after each time the cursor moves down to the next row,
    # iterate to the next list element (x)
    with arcpy.da.UpdateCursor(ParcelsFinal, "SCORE_Patch_Size") as cursor:
        for row in cursor:
            row[0] = patch_size_scores[x]
            cursor.updateRow(row)
            x += 1

    # ############### Proximity Score #####################

    # Rename Distance field to be more decriptive
    # delete NEAD FID feild (un-needed)
    arcpy.AlterField_management(ParcelsFinal,
                                "NEAR_DIST",
                                new_field_name="Dist_to_Protected",
                                field_is_nullable="NULLABLE")
    arcpy.DeleteField_management(ParcelsFinal, "NEAR_FID")

    # extract proximity field
    all_proximities = []
    proximities = arcpy.da.SearchCursor(ParcelsFinal, "Dist_to_Protected")
    # Perform calulation for score and append to new list. Accomodate for str and tuple field types
    for proximity in proximities:
        if isinstance(proximity, tuple):
            proximity = proximity[0]
        elif isinstance(proximity, str):
            proximity = float(proximity)
        all_proximities.append(proximity)

    # now we populate a new list and assign scores based on number ranges
    proximity_scores = []
    for proximity in all_proximities:
        if proximity == 0:
            proximity_scores.append(1)
        elif proximity > 0 and proximity < 2000:
            proximity_scores.append(0.75)
        elif proximity > 2000 and proximity < 4000:
            proximity_scores.append(.5)
        elif proximity > 4000:
            proximity_scores.append(0)

    # create new field
    arcpy.AddField_management(ParcelsFinal,
                              "SCORE_Proximity",
                              "DOUBLE",
                              field_length=50)

    x = 0

    # use update cursor to populate rows with list element and after each time the cursor moves down to the next row,
    # iterate to the next list element (x)
    with arcpy.da.UpdateCursor(ParcelsFinal, "SCORE_Proximity") as cursor:
        for row in cursor:
            row[0] = proximity_scores[x]
            cursor.updateRow(row)
            x += 1

    # ##################### FINAL PRIORITY SCORES ###########################

    sumOfScores = []
    scoreFields = arcpy.da.SearchCursor(ParcelsFinal, [
        "SCORE_Lotic_Deciles", "SCORE_Wetland_Deciles", "SCORE_Intactness",
        "SCORE_Patch_Size", "SCORE_Proximity"
    ])
    for score in scoreFields:
        sumScore = score[0] + score[1] + score[2] + score[3] + score[4]
        sumOfScores.append(sumScore)

    # create new field
    arcpy.AddField_management(ParcelsFinal,
                              "PRIORITY_SCORE",
                              "DOUBLE",
                              field_length=50)

    x = 0

    # use update cursor to populate rows with list element and after each time the cursor moves down to the next row,
    # iterate to the next list element (x)
    with arcpy.da.UpdateCursor(ParcelsFinal, "PRIORITY_SCORE") as cursor:
        for row in cursor:
            row[0] = sumOfScores[x]
            cursor.updateRow(row)
            x += 1
    # the order of the resulting list is identical to the original, so it can be appended as a new field and the values will
    # correspond with the rows they are meant to score

    # ################################## PRIORITY RANKING #######################################

    # now we calculate ranges for priority ranking with 4 breaks (Quartiles)
    ranges = np.percentile(sumOfScores, np.arange(0, 100, 25))

    final_priority_ranking = []
    for score in sumOfScores:
        if score >= ranges[0] and score <= ranges[1]:
            final_priority_ranking.append(None)
        elif score >= ranges[1] and score <= ranges[2]:
            final_priority_ranking.append(3)
        elif score >= ranges[2] and score <= ranges[3]:
            final_priority_ranking.append(2)
        elif score >= ranges[3]:
            final_priority_ranking.append(1)

    # create new field
    arcpy.AddField_management(ParcelsFinal,
                              "PRIORITY_RANKING",
                              "DOUBLE",
                              field_length=50)

    x = 0

    # use update cursor to populate rows with list element and after each time the cursor moves down to the next row,
    # iterate to the next list element (x)
    with arcpy.da.UpdateCursor(ParcelsFinal, "PRIORITY_RANKING") as cursor:
        for row in cursor:
            row[0] = final_priority_ranking[x]
            cursor.updateRow(row)
            x += 1

    arcpy.CheckInExtension("spatial")

    print("proccess complete")
    print("...........")
    print(
        "The resulting priority scored parcels feature class can be found in the user specified geodatabase by the name of 'ParcelsFinal'"
    )
    print(
        "To view the Conservation Priority ranking, symbolize the feature class by unique values, using the 'PRIORITY_RANKING' field."
    )
Ejemplo n.º 18
0
def main(*argv):
    """ main driver of program """
    try:
        attr_features = argv[0]
        sql_clause = argv[1]
        polygon_grid = argv[2]
        error_field_count = str(argv[3])  #'NULL_COUNT'#
        error_field_def = str(argv[4])  #'NULL_COLUMNS'#
        output_fc = argv[5]
        out_fc_exists = arcpy.Exists(output_fc)

        #  Local Variable
        #
        scratchFolder = env.scratchFolder
        scratchGDB = env.scratchGDB
        results = []
        #  Logic
        #
        if not out_fc_exists:
            output_gdb = validate_workspace(os.path.dirname(output_fc))
            #  Create the grid
            #
            out_grid = arcpy.CopyFeatures_management(polygon_grid,
                                                     output_fc)[0]
            out_grid = extend_table(out_grid)
            where_clause = None
        else:
            arcpy.MakeFeatureLayer_management(output_fc, "lyr")
            arcpy.SelectLayerByLocation_management("lyr",
                                                   "HAVE_THEIR_CENTER_IN",
                                                   polygon_grid)
            oids = [row[0] for row in arcpy.da.SearchCursor("lyr", "OID@")]
            if len(oids) > 1:
                oids_string = str(tuple(oids))
            else:
                oids_string = str('(' + str(oids[0]) + ')')

            where_clause = 'OBJECTID IN ' + oids_string

        #  Process the Data
        #
        error_field = (error_field_def, error_field_count)
        grid_sdf = SpatialDataFrame.from_featureclass(
            filename=output_fc, where_clause=where_clause)
        if sql_clause:
            attr_sdf = SpatialDataFrame.from_featureclass(
                attr_features, fields=error_field, where_clause=sql_clause)
        else:
            attr_sdf = SpatialDataFrame.from_featureclass(attr_features,
                                                          fields=error_field)
        index = attr_sdf.sindex
        for idx, row in enumerate(grid_sdf.iterrows()):
            errors = []
            attrs = []
            geom = row[1].SHAPE
            oid = row[1].OBJECTID
            print(str(oid))
            ext = [
                geom.extent.lowerLeft.X, geom.extent.lowerLeft.Y,
                geom.extent.upperRight.X, geom.extent.upperRight.Y
            ]
            row_oids = list(index.intersect(ext))
            df_current = attr_sdf.loc[row_oids]  #.copy()
            sq = df_current.geometry.disjoint(geom) == False
            fcount = len(df_current[sq])  # Total Count
            q2 = df_current[error_field_count] > 0
            #& q2
            df_current = df_current[sq].copy(
            )  # Get the # of features with deficiency_cnt > 0
            #print("here")
            if fcount > 0:  #len(df_current) > 0:
                errors += df_current[error_field_count].tolist()
                arcpy.AddMessage(str(errors))

                def process(x):
                    print(x)
                    return [
                        va
                        for va in x.replace(' ', '').split('|')[-1].split(',')
                        if len(va) > 1
                    ]

                for e in df_current[error_field_def].apply(process).tolist():
                    attrs += e
                    del e
            row = get_answers(oid=oid,
                              err=errors,
                              attr=attrs,
                              feature_count=fcount)
            results.append(row)
            if len(results) > 250:
                extend_table(table=output_fc, rows=results)
                results = []
            del idx
            del row
            del errors
            del attrs
            del geom
            del oid
            del ext
            del row_oids
            del df_current
            del sq
            del q2
        if len(results) > 0:
            extend_table(table=output_fc, rows=results)
        del index
        del results
        del grid_sdf
        del attr_sdf
    except arcpy.ExecuteError:
        line, filename, synerror = trace()
        arcpy.AddError("error on line: %s" % line)
        arcpy.AddError("error in file name: %s" % filename)
        arcpy.AddError("with error message: %s" % synerror)
        arcpy.AddError("ArcPy Error Message: %s" % arcpy.GetMessages(2))
    except FunctionError as f_e:
        messages = f_e.args[0]
        arcpy.AddError("error in function: %s" % messages["function"])
        arcpy.AddError("error on line: %s" % messages["line"])
        arcpy.AddError("error in file name: %s" % messages["filename"])
        arcpy.AddError("with error message: %s" % messages["synerror"])
        arcpy.AddError("ArcPy Error Message: %s" % messages["arc"])
    except:
        line, filename, synerror = trace()
        arcpy.AddError("error on line: %s" % line)
        arcpy.AddError("error in file name: %s" % filename)
        arcpy.AddError("with error message: %s" % synerror)
Ejemplo n.º 19
0
# Projisoidaan gps-pisteet verkston koordinaatistoon (KKJ2, EPSG 2392)
for shape in files:
    filename = os.path.basename(shape)
    newname = "C" + filename.replace(
        ".shp", "")  # nimi ei voi alkaa numerolla, siksi C alkuun
    kkj_file = os.path.join(kkj_fp, newname)
    arcpy.Project_management(shape, kkj_file, spatial_ref,
                             "KKJ_To_WGS_1984_2_JHS153")

# Määritetään workspace
workspace = r"[fp]GPS.gdb"
arcpy.env.workspace = workspace
arcpy.env.overwriteOutput = True  # Sallitaan ylikirjoitus

kkj_List = arcpy.ListFeatureClasses()

pk_bufferi = r"[fp]"

for x in kkj_List:
    lyrname = x + "_lyr"
    # GPS shapefileista layerit jotta voi tehdä select by locationin
    lyr = arcpy.MakeFeatureLayer_management(x, lyrname)
    # Rajataan pisteet tutkimusalueelle pk-seudun kuntien 5 km bufferilla
    arcpy.SelectLayerByLocation_management(
        lyr, "INTERSECT", "", "", "NEW_SELECTION",
        "INVERT")  # käännetään valinta invertilla
    # Jos alueen ulkopuolisia pisteitä on valittuna, ne poistetaan
    if int(arcpy.GetCount_management(lyr).getOutput(0)) > 0:
        arcpy.DeleteFeatures_management(lyr)
        print(lyr)  # tulostetaan poistetun nimi
Ejemplo n.º 20
0
def get_field_names(shp):
    fieldnames = [f.name for f in arcpy.ListFields(shp)]
    return fieldnames


#print(get_field_names(base_flows_shp))

with arcpy.da.SearchCursor(
        base_flows_shp,
    ["FID", "vol_ab", "vol_ba", "tob_ab", "ton_ba"]) as cursor:
    for row in cursor:
        where_clause = """ "FID" = %d""" % row[0]
        arcpy.SelectLayerByAttribute_management(base_flows_shpf,
                                                "NEW_SELECTION", where_clause)
        arcpy.Buffer_analysis(base_flows_shpf, disk_shp, "50 feet")
        arcpy.SelectLayerByLocation_management(link_shpf, "COMPLETELY_WITHIN",
                                               disk_shp)
        IDs = [row1.getValue("ID") for row1 in arcpy.SearchCursor(link_shpf)]
        for ID in IDs:
            base_flows_dict[ID] = [row[1], row[2], row[3], row[4]]

base_flows_df = pandas.DataFrame(base_flows_dict).transpose().reset_index()
base_flows_df.columns = [
    "ID", "base_vol_daily_ab", "base_vol_daily_ba", "base_ton_daily_ab",
    "base_ton_daily_ba"
]

base_flows_df.ID = base_flows_df.ID.astype(int)
base_flows_df.base_vol_daily_ab = base_flows_df.base_vol_daily_ab.astype(int)
base_flows_df.base_vol_daily_ba = base_flows_df.base_vol_daily_ba.astype(int)
base_flows_df.base_ton_daily_ba = base_flows_df.base_ton_daily_ba.astype(float)
base_flows_df.base_ton_daily_ba = base_flows_df.base_ton_daily_ba.astype(float)
def crs_check_data_extent(args):
    # script parameters
    gdb = args[0]
    extentString = args[1]
    itemsToCheck = args[2]

    # workspace
    arcpy.env.workspace = gdb

    # script name
    script_name = os.path.basename(__file__)

    # variables
    err_message = None

    log_msg('calling {}'.format(script_name))
    try:

        extentValues = extentString.split(',')

        if len(extentValues) != 4:
            err_message = "missing pamaremter in extent config"
            return err_message

        xMin = int(extentValues[0])
        yMin = int(extentValues[1])
        xMax = int(extentValues[2])
        yMax = int(extentValues[3])

        extent = arcpy.Extent(xMin, yMin, xMax, yMax)
        extentArray = arcpy.Array(
            i for i in (extent.lowerLeft, extent.lowerRight, extent.upperRight,
                        extent.upperLeft, extent.lowerLeft))
        # create a extent polygon
        extentPolygon = arcpy.Polygon(extentArray, sr)

        # go through feature class in crs gdb, delete feature which is out of the nz bound
        fcs = arcpy.ListFeatureClasses()

        if len(itemsToCheck) > 0:
            fcs = list(set(fcs).intersection(set(itemsToCheck)))

        for fc in fcs:
            name = arcpy.Describe(fc).name
            log_msg('checking {0}...'.format(name))

            # Make a layer and select features which within the extent polygon
            lyr = 'lyr_{}'.format(name)
            delete_layer(lyr)

            arcpy.MakeFeatureLayer_management(fc, lyr)
            count = int(arcpy.GetCount_management(lyr)[0])

            arcpy.SelectLayerByLocation_management(lyr, "INTERSECT",
                                                   extentPolygon, "",
                                                   "NEW_SELECTION",
                                                   "NOT_INVERT")
            arcpy.SelectLayerByLocation_management(lyr, "", "", "",
                                                   "SWITCH_SELECTION")

            count = int(arcpy.GetCount_management(lyr)[0])
            # delete features outside nz bound
            if count > 0:
                log_msg('deleting features in {0}: {1}'.format(name, count))
                arcpy.DeleteFeatures_management(lyr)

    except Exception as e:
        err_message = "ERROR while running {0}: {1}".format(script_name, e)

    return err_message, log_messages
Ejemplo n.º 22
0
                    arcpy.AddMessage("sWhere={}".format(sWhere))
                    pFLProcCat = "{}{}".format(sName, iProcess)
                    arcpy.MakeFeatureLayer_management(inCatchment, pFLProcCat,
                                                      sWhere)
                    pFCProcCat = os.path.join(pProcWKS,
                                              "Cat{}".format(iProcess))
                    arcpy.CopyFeatures_management(pFLProcCat, pFCProcCat)
                    dtol = float(arcpy.env.cellSize) * 2.0
                    arcpy.AddMessage("Arcpy.Exists({})={}".format(
                        inStream, arcpy.Exists(inStream)))
                    pFLProcRiver = "Riv{}".format(iProcess)
                    pFCProcRiver = os.path.join(
                        os.path.join(pProcWKS, pFLProcRiver))
                    arcpy.MakeFeatureLayer_management(inStream, pFLProcRiver)
                    arcpy.SelectLayerByLocation_management(
                        pFLProcRiver, "INTERSECT", pFCProcCat, (-1.0 * dtol),
                        "NEW_SELECTION", "NOT_INVERT")
                    arcpy.CopyFeatures_management(pFLProcRiver, pFCProcRiver)
                    pFLProcPoints = "Pnt{}".format(iProcess)
                    pFCProcpoints = os.path.join(pProcWKS, pFLProcPoints)
                    arcpy.MakeFeatureLayer_management(inPoints, pFLProcPoints)
                    arcpy.SelectLayerByLocation_management(
                        pFLProcPoints, "INTERSECT", pFCProcCat, None,
                        "NEW_SELECTION", "NOT_INVERT")
                    arcpy.CopyFeatures_management(pFLProcPoints, pFCProcpoints)
                    sWKSName = "SWK{}.gdb".format(iProcess)
                    sOutFCName = "{}_{}".format(sName, iProcess)

                    pFolderPS = pProcFolder  # os.path.join(pProcFolder, "SWK{}".format(iProcess))

                    if (os.path.exists(pFolderPS) == False):
Ejemplo n.º 23
0
# Define relevant feature classes
fcRosters = arcpy.GetParameterAsText(0)
fcCountries = arcpy.GetParameterAsText(1)

# Define a country
countryName = arcpy.GetParameterAsText(2)
countryQuery = "CNTRY_NAME = '" + countryName + "'"

try:
    # make an attribute selection to select a country
    arcpy.MakeFeatureLayer_management(fcCountries, "SelectedCountry",
                                      countryQuery)
    arcpy.MakeFeatureLayer_management(fcRosters, "SelectedPlayers")

    # Use a spatial selection to grab all the players that fall within this country
    arcpy.SelectLayerByLocation_management("SelectedPlayers", "CONTAINED_BY",
                                           "SelectedCountry")

    # Makes a separate shapefile for each of the three forward positions
    # (center, right wing, and left wing) from the player roster
    arcpy.MakeFeatureLayer_management("SelectedPlayers", "Center",
                                      "position = 'C'")
    arcpy.CopyFeatures_management("Center", "/Centers.shp")

    arcpy.MakeFeatureLayer_management("SelectedPlayers", "RightWing",
                                      "position = 'RW'")
    arcpy.CopyFeatures_management("RightWing", "/RightWing.shp")

    arcpy.MakeFeatureLayer_management("SelectedPlayers", "LeftWing",
                                      "position = 'LW'")
    arcpy.CopyFeatures_management("LeftWing", "/LeftWing.shp")
Ejemplo n.º 24
0
def multiProcessing_function(data):
    month = getMonthFromFileName(data)
    print "Start processing " + month + " at: " + str(datetime.now()) + "\n"
    worktempfolder = out_folder + "\\" + shipType + "\\temp_" + month
    if not arcpy.Exists(worktempfolder):
        arcpy.CreateFolder_management(out_folder + "\\" + shipType,
                                      "temp_" + month)

    for i in range(1, 6):
        print "--- Spatial join " + month + " gridDivision " + str(i) + "..."

        gridDivision = grids_folder + "\\grid_division" + str(i) + ".shp"

        arcpy.MakeFeatureLayer_management(gridDivision,
                                          "gridDivision_lyr_" + month)
        arcpy.MakeFeatureLayer_management(grid,
                                          "grid_lyr_" + month + "_" + str(i))
        arcpy.MakeFeatureLayer_management(data, "line_lyr")

        arcpy.SelectLayerByLocation_management(
            "grid_lyr_" + month + "_" + str(i), "WITHIN",
            "gridDivision_lyr_" + month)
        arcpy.MakeFeatureLayer_management("grid_lyr_" + month + "_" + str(i),
                                          "grid_lyr_" + month)

        #result = arcpy.GetCount_management("line_lyr")
        #count = int(result.getOutput(0))
        #print " lines total count: " + str(count) + " " + month

        # Select lines in grid division
        arcpy.SelectLayerByLocation_management("line_lyr", "INTERSECT",
                                               "gridDivision_lyr_" + month, "",
                                               "NEW_SELECTION")

        #if count > 0:
        # Select grids intersecting lines
        arcpy.SelectLayerByLocation_management("grid_lyr_" + month,
                                               "INTERSECT", "line_lyr", "",
                                               "NEW_SELECTION")

        # Spatial join selected lines and grids
        arcpy.SpatialJoin_analysis(
            "grid_lyr_" + month, "line_lyr",
            worktempfolder + "\\" + month + "_SpJoin_" + str(i) + ".shp",
            "JOIN_ONE_TO_MANY", "", "", "INTERSECT")

        arcpy.Delete_management("gridDivision_lyr_" + month)
        arcpy.Delete_management("grid_lyr_" + month + "_" + str(i))
        arcpy.Delete_management("grid_lyr_" + month)
        arcpy.Delete_management("line_lyr")

        print "--- End spatial join: " + month + " gridDivision " + str(
            i) + "..."

    spjoinList = []
    for spjoin in os.listdir(worktempfolder):
        if spjoin.endswith(".shp"):
            spjoinList.append(worktempfolder + "\\" + spjoin)

    if len(spjoinList) > 0:
        # Merge Spatial Joins
        print "--- Merge " + month + "..."
        arcpy.Merge_management(spjoinList,
                               worktempfolder + "\\" + month + "_Merged.shp")
        print "--- End merge " + month + "..."

        # Dissolve merged
        print "--- Dissolve " + month + "..."
        arcpy.Dissolve_management(
            worktempfolder + "\\" + month + "_Merged.shp",
            worktempfolder + "\\" + month + "_Dissolve.shp", "TARGET_FID",
            [["Join_Count", "SUM"]])
        print "--- End dissolve " + month + "..."

        # Make raster out of dissolved
        print "--- FeatureToRaster " + month + "..."
        arcpy.FeatureToRaster_conversion(
            worktempfolder + "\\" + month + "_Dissolve.shp", "SUM_Join_C",
            out_folder + "\\" + shipType + "\\" + month + "_" + year + "_" +
            shipType + "_Raster" + ".tif", 1000)
        print "--- End FeatureToRaster " + month + "..."

        arcpy.Delete_management(worktempfolder)
    print "End processing " + month + " at: " + str(datetime.now()) + "\n"
Ejemplo n.º 25
0
# Extract features to a new feature class based on a Location and an attribute query
#-------------------------------------------------------------------------------
# Import arcpy and set path to data
import arcpy
arcpy.env.workspace = r"C:\Users\laboratorios\ELVLC\DATA\castilla-leon"
arcpy.env.overwriteOutput = True
arcpy.MakeFeatureLayer_management('MUNICIPIO.shp', 'municipio_lyr')
arcpy.SelectLayerByAttribute_management('municipio_lyr',
                                        'NEW_SELECTION', '"POB95" > 5000')
arcpy.CopyFeatures_management("municipio_lyr", 'scriptFIN_municipio.shp')
# SelectLayerByLocation_management (in_layer, {overlap_type}, {select_features}, {search_distance}, {selection_type})
arcpy.MakeFeatureLayer_management('ESTACIONES.shp', 'estaciones_lyr')
arcpy.MakeFeatureLayer_management('EMBALSES.shp', 'embalses_lyr')
#distance = 40000 # your distance here (could be a string as well)
#linearUnit = distance + "Meters" # use any of the above provided measurement keywords
arcpy.SelectLayerByLocation_management("estaciones_lyr", 'WITHIN_A_DISTANCE', "embalses_lyr","40000")
arcpy.CopyFeatures_management("estaciones_lyr", 'scriptFIN_estaciones.shp')
arcpy.MakeFeatureLayer_management('scriptFIN_estaciones.shp', 'script_estaciones_lyr')
arcpy.MakeFeatureLayer_management('scriptFIN_municipio.shp', 'script_municipio_lyr')
arcpy.SelectLayerByLocation_management('script_municipio_lyr', 'intersect', 'script_estaciones_lyr')
arcpy.CopyFeatures_management("script_municipio_lyr", 'scriptFIN_FINAL.shp')


#-------------------------------------------------------------------------------
# SELECTIONS
#-------------------------------------------------------------------------------
# Import system modules
import arcpy

# Set the workspace
arcpy.env.workspace = "C:\Users\laboratorios\ELVLC\DATA\castilla-leon"
# Select Layers by Location, then output layer to scratch
# Process: Raster Domain
arcpy.RasterDomain_3d(
    raster_location + raster_file, mask_scratch +
    "\\GEOG490_maskscratch.gdb\\" + mask_file[:-4] + "_rasterExtent",
    "POLYGON")
# Make temp layer with polygon mask and raster extent
arcpy.MakeFeatureLayer_management(
    mask_scratch + "\\GEOG490_maskscratch.gdb\\" + mask_file[:-4] +
    "_masktemp", 'temp_mask_within')
arcpy.MakeFeatureLayer_management(
    mask_scratch + "\\GEOG490_maskscratch.gdb\\" + mask_file[:-4] +
    "_rasterExtent", 'temp_rasterExtent')
# Get only polygons that are within the raster
arcpy.SelectLayerByLocation_management("temp_mask_within", "COMPLETELY_WITHIN",
                                       "temp_rasterExtent", "",
                                       "NEW_SELECTION", "NOT_INVERT")
# Output new file, only if there are no problems
matchcount = int(arcpy.GetCount_management('temp_mask_within')[0])
if matchcount == 0:
    print('Error: No points exist within the clipping raster!')
    sys.exit('Error: No points exist within the clipping raster!')
else:
    print(str(matchcount) + ' points found within the raster to clip.')
    arcpy.CopyFeatures_management(
        'temp_mask_within', mask_scratch + "\\GEOG490_maskscratch.gdb\\" +
        mask_file[:-4] + "_mask")

# Import and execute special module
# Create a separate polygon mask file for each feature in the polygon layer
print("Running external script...")
Ejemplo n.º 27
0
    "Shape Shape VISIBLE NONE;FID FID VISIBLE NONE;ID ID VISIBLE NONE;GRIDCODE GRIDCODE VISIBLE NONE;ORIG_FID ORIG_FID VISIBLE NONE"
)

# Process: Make Feature Layer (2)
arcpy.MakeFeatureLayer_management(
    Bobcat_Hab_merge, Output_Layer__2_, "", "",
    "FID FID VISIBLE NONE;Shape Shape VISIBLE NONE;ID ID VISIBLE NONE;GRIDCODE GRIDCODE VISIBLE NONE;area area VISIBLE NONE;sizeclass sizeclass VISIBLE NONE;sqkm sqkm VISIBLE NONE"
)

# Process: Select Layer By Attribute
arcpy.SelectLayerByAttribute_management(Output_Layer__2_, "NEW_SELECTION",
                                        "\"GRIDCODE\" = 1")

# Process: Select Layer By Location
arcpy.SelectLayerByLocation_management(Output_Layer, "BOUNDARY_TOUCHES",
                                       Bobcat_Hab_merge__2_, "",
                                       "NEW_SELECTION")

# Process: Select Layer By Attribute (2)
arcpy.SelectLayerByAttribute_management(Output_Layer__2_, "NEW_SELECTION",
                                        "\"GRIDCODE\" = 2")

# Process: Select Layer By Location (2)
arcpy.SelectLayerByLocation_management(RasterT_badg_li1_Erase_Layer,
                                       "BOUNDARY_TOUCHES",
                                       Bobcat_Hab_merge__3_, "",
                                       "SUBSET_SELECTION")

# Process: Select Layer By Attribute (3)
arcpy.SelectLayerByAttribute_management(RasterT_badg_li1_Erase_Layer__2_,
                                        "SUBSET_SELECTION", "GRIDCODE = 1")
Ejemplo n.º 28
0
def main(*argv):
    """ main driver of program """
    try:
        fcs = argv[0]
        source_field = str(argv[1]).upper()
        value_field = str(argv[2]).upper()
        polygon_grid = argv[3]
        output_fc = argv[4]
        out_fc_exists = arcpy.Exists(output_fc)

        output_gdb, out_name = os.path.split(output_fc)
        #   Local Variables
        #
        scratchGDB = env.scratchGDB
        scratchFolder = env.scratchFolder
        results = []
        source_fields = ['zi001_sdp'.upper(),
                         'zi001_sps'.upper()]
        #  Logic
        #
        if not source_field:
            source_field = source_fields[1]
        if not value_field:
            value_field = source_fields[0]
        #if not output_gdb:
        #    output_gdb = env.scratchGDB
        master_times = datetime.datetime.now()
        fc = fcs
        if not out_fc_exists:
            output_gdb = validate_workspace(wrksp=output_gdb)

            #out_name = "srcLin_%s_%s" % (os.path.basename(fc[:-3]), fc[-3:])
            out_grid = os.path.join(output_gdb, out_name)
            out_grid = arcpy.CopyFeatures_management(polygon_grid, out_grid)[0]

            arcpy.AddMessage("Working on feature class: %s" % os.path.basename(fc))
            array = process_source_lineage(grid_sdf=out_grid,
                                           data_sdf=fc,
                                           search_field=source_field,
                                           value_field=value_field)
            extend_table(out_grid, array)
            results.append(out_grid)
        else:
            arcpy.MakeFeatureLayer_management(output_fc, "lyr")
            arcpy.SelectLayerByLocation_management("lyr", "HAVE_THEIR_CENTER_IN", polygon_grid)
            oids = [row[0] for row in arcpy.da.SearchCursor("lyr", "OID@")]
            if len(oids) >1:
                oids_string = str(tuple(oids))
            else:
                oids_string = str('('+ str(oids[0]) + ')')

            arcpy.AddMessage("Analyzing " + oids_string)
            arcpy.AddMessage("Working on feature class: %s" % os.path.basename(fc))
            array = process_source_lineage(grid_sdf=output_fc,
                                           data_sdf=fc,
                                           search_field=source_field,
                                           value_field=value_field,
                                           where_clause='OBJECTID IN ' + oids_string)
            extend_table(output_fc, array)
            results.append(output_fc)

        arcpy.AddMessage("Analysis on feature class: %s finished." % os.path.basename(fc))
        arcpy.AddMessage("Total Time %s" % (datetime.datetime.now() - master_times))
        #arcpy.SetParameterAsText(5, results)
    except arcpy.ExecuteError:
        line, filename, synerror = trace()
        arcpy.AddError("error on line: %s" % line)
        arcpy.AddError("error in file name: %s" % filename)
        arcpy.AddError("with error message: %s" % synerror)
        arcpy.AddError("ArcPy Error Message: %s" % arcpy.GetMessages(2))
        log.error("error on line: %s" % line)
        log.error("error in file name: %s" % filename)
        log.error("with error message: %s" % synerror)
        log.error("ArcPy Error Message: %s" % arcpy.GetMessages(2))
    except FunctionError as f_e:
        messages = f_e.args[0]
        arcpy.AddError("error in function: %s" % messages["function"])
        arcpy.AddError("error on line: %s" % messages["line"])
        arcpy.AddError("error in file name: %s" % messages["filename"])
        arcpy.AddError("with error message: %s" % messages["synerror"])
        arcpy.AddError("ArcPy Error Message: %s" % messages["arc"])
        log.error("error in function: %s" % messages["function"])
        log.error("error on line: %s" % messages["line"])
        log.error("error in file name: %s" % messages["filename"])
        log.error("with error message: %s" % messages["synerror"])
        log.error("ArcPy Error Message: %s" % messages["arc"])
    except:
        line, filename, synerror = trace()
        arcpy.AddError("error on line: %s" % line)
        arcpy.AddError("error in file name: %s" % filename)
        arcpy.AddError("with error message: %s" % synerror)
        log.error("error on line: %s" % line)
        log.error("error in file name: %s" % filename)
        log.error("with error message: %s" % synerror)
    finally:
        logging.shutdown()
                    if landcover_majority == 2:
                        if pred_grid_min == 1 or landcover_min == 1:
                            row[1] = "REVIEW"
                        else:
                            row[1] = "NOT ASSESS"
                        u_cur.updateRow(row)
                        continue
                # end of remote sensing work

        log("remote sensing values transferred to parcel input data")

        log("running various flag selections...")
        # NDMPL Tribal Selection by Centroid
        flyr_tribal = "tribal_selection"
        arcpy.MakeFeatureLayer_management(input_parcels, flyr_tribal)
        arcpy.SelectLayerByLocation_management(in_layer=flyr_tribal, overlap_type="HAVE_THEIR_CENTER_IN", select_features=TRIBAL_FC, search_distance="", selection_type="NEW_SELECTION")

        # NDMPL Federal Selection by Centroid
        flyr_fed = "fed_selection"
        arcpy.MakeFeatureLayer_management(input_parcels, flyr_fed)
        arcpy.SelectLayerByLocation_management(in_layer=flyr_fed, overlap_type="HAVE_THEIR_CENTER_IN", select_features=FED_FC, search_distance="", selection_type="NEW_SELECTION")
        
        # DNR Lands Selection by Centroid
        flyr_dnr = "dnr_selection"
        arcpy.MakeFeatureLayer_management(input_parcels, flyr_dnr)
        arcpy.SelectLayerByLocation_management(in_layer=flyr_dnr, overlap_type="HAVE_THEIR_CENTER_IN", select_features=DNR_LANDS_FC, search_distance="", selection_type="NEW_SELECTION")
        
        # create search cursors from feature layers and spatial parcels
        tribal_flgs = [i[0] for i in arcpy.da.SearchCursor(flyr_tribal, ["PSL_NRML_PRCL_ID"])]
        fed_flgs = [i[0] for i in arcpy.da.SearchCursor(flyr_fed, ["PSL_NRML_PRCL_ID"])]
        dnr_flgs = [i[0] for i in arcpy.da.SearchCursor(flyr_dnr, ["PSL_NRML_PRCL_ID"])]
Ejemplo n.º 30
0
import arcpy
from arcpy import env
env.workspace = r"H:\6525\mfcrrsco\census"
env.overwriteOutput = True

fc = "census_US.shp"
fieldName = "STATE_NAME"
value = 'Tennessee'
query = '"' + fieldName + '" = ' + "'" + value + "'"

#make feature layer

arcpy.MakeFeatureLayer_management(
    fc,
    "allStates")  #entire US as a feature layer; saved in memory not harddrive
arcpy.MakeFeatureLayer_management(
    fc, "TN", query)  #selects only Tennessee as a select by attributes

#select by location
arcpy.SelectLayerByLocation_management(
    "allStates", "BOUNDARY_TOUCHES",
    "TN")  #allStates is now only those selected

#check output; be very comfortable with search cursor
with arcpy.da.SearchCursor(
        "allStates", ("STATE_NAME", )
) as cursor:  #it's much faster because it looks up existing attributes table but doesn't make a new one in MakeFeatureLayer
    for row in cursor:
        print row[0]