# Name: arcpy_SplitByAttributes.py
# Description: Use the SplitByAttributes tool to split a feature class by unique values.

# Import required modules
import arcpy

# Set local variables
in_feature_class = 'c:/data/base.gdb/ecology'
target_workspace = 'c:/data/output.gdb'
fields = ['REGION', 'ECO_CODE']

arcpy.SplitByAttributes_analysis(in_feature_class, target_workspace, fields)
Exemplo n.º 2
0
    Est_Join, Per_a_proc,
    'Estacao_Vazia_Features.DthInicioPeriodo IS NULL AND StageDW.SFG.StgEstacaoCurvaPermanencia_Features.MdaLatitude IS NOT NULL'
)

#Make XY Layer de período a processar

arcpy.MakeXYEventLayer_management(
    Per_a_proc, "StageDW_SFG_StgEstacaoCurvaPermanencia_Features_MdaLongitude",
    "StageDW_SFG_StgEstacaoCurvaPermanencia_Features_MdaLatitude", Per_fc,
    "GEOGCS['GCS_SIRGAS_2000',DATUM['D_SIRGAS_2000',SPHEROID['GRS_1980',6378137.0,298.257222101]],PRIMEM['Greenwich',0.0],UNIT['Degree',0.0174532925199433]];-400 -400 1000000000;-100000 10000;-100000 10000;8,98315284119521E-09;0,001;0,001;IsHighPrecision",
    "")

#Split by attributes

arcpy.SplitByAttributes_analysis(
    Per_fc, WsPeriodo,
    "StageDW_SFG_StgEstacaoCurvaPermanencia_Features_DthInicioPeriodo;StageDW_SFG_StgEstacaoCurvaPermanencia_Features_DthFimPeriodo"
)

#Lista de FeatureClass de Pontos a processar
FcPontos = arcpy.ListFeatureClasses('', 'Point')

#Iteracao do IDW com extent do Brasil
for fc in FcPontos:
    OutIDW = arcpy.Describe(fc).baseName + "_IDW"
    #arcpy.IDW_ga(in_features=fc, z_field="Estacao_BI_Features_MdaiDRE_LT", out_ga_layer="", out_raster=OutIDW,
    #            cell_size="0,156020628", power="2",
    #           search_neighborhood="NBRTYPE=Smooth S_MAJOR=11,7367757908039 S_MINOR=11,7367757908039 ANGLE=0 SMOOTH_FACTOR=0,4",
    #          weight_field="")
    tempEnvironment0 = arcpy.env.newPrecision
    arcpy.env.newPrecision = "SINGLE"
    tempEnvironment1 = arcpy.env.autoCommit
Exemplo n.º 3
0
        # Add XY Coordinates to feature class in the NAD_1983_Alaska_Albers projection
        arcpy.AddXY_management(outShapefile)
        # Delete intermediate files
        arcpy.Delete_management(tempRaster)
    elif noData == 1:
        arcpy.AddMessage("All values for this watershed are nodata...")
        # Delete intermediate files
        arcpy.Delete_management(tempRaster)

# Set the snap raster and cell size environments
arcpy.env.snapRaster = area_of_interest
arcpy.env.cellSize = area_of_interest

# Split watersheds by attribute and store as independent feature classes in an empty geodatabase
arcpy.AddMessage("Splitting unique watersheds into independent feature classes...")
arcpy.SplitByAttributes_analysis(watersheds, watershed_geodatabase, ['HUC10'])

# List all watershed feature classes in the watershed geodatabase
arcpy.env.workspace = watershed_geodatabase
featureClasses = arcpy.ListFeatureClasses()
featureClasses_length = len(featureClasses)

# Loop through feature classes and convert them to point grids with the same cell size and grid as the area of interest
cell_size = arcpy.GetRasterProperties_management(area_of_interest, "CELLSIZEX")
grid_raster = os.path.join(workspace_folder, "grid_raster.tif")
count = 1
for feature in featureClasses:
    arcpy.AddMessage("Converting watershed to point grid for watershed " + str(count) + " of " + str(featureClasses_length) + "...")
    input_feature = os.path.join(watershed_geodatabase, feature)
    output_shapefile = os.path.join(output_folder, feature + ".shp")
    buildPointGrids(input_feature, grid_raster, cell_size, output_shapefile)
# Create a temp feature for Mexico
mexico = select_by_attribute(Nation, "\"ADM0_ID\" = '484'")
select_by_location(mergeFile, mexico, "2 Kilometers", output)

print "Step 10 completed at", datetime.datetime.now().strftime("%A, %B %d %Y %I:%M:%S%p")


## ---------------------------------------------------------------------------        
## 11. Split By Attributes and Rename dataset
## Description: Split the dataset by state.

print "\nStep 11 Split by attribute starts at", datetime.datetime.now().strftime("%A, %B %d %Y %I:%M:%S%p")

fields = [ABV]
arcpy.SplitByAttributes_analysis(output, finalFolder, fields)

arcpy.env.workspace = "C:\\GIS_RGB\\Geodatabase\\Hydrau_WaterUse\\3_waterRights\\final_output\\"

list_fc = ["COA.shp", "CHI.shp", "DUR.shp", "NVL.shp", "TAM.shp"]

arcpy.Delete_management("T.shp")

for fc in list_fc:
    name = os.path.splitext(fc)[0]
    arcpy.Rename_management(fc, name + "_WaterRights")


print "Step 11 completed at", datetime.datetime.now().strftime("%A, %B %d %Y %I:%M:%S%p")

arcpy.env.cellSize = 0.5

src = r'D:\test\test.gdb\MAMMALS_COL_SMALL'
dst = r'D:\test\mammal_richness.tif'

# add a ones column and populate it with ones
arcpy.AddField_management(src, 'ONES', 'SHORT')
arcpy.CalculateField_management(src, 'ONES', '1')

# add a column with the feature ID in order to get unique values for each feat
arcpy.AddField_management(src, 'FEAT', 'TEXT')
arcpy.CalculateField_management(src, 'FEAT', "'feat_{}'.format(!FID!)",
                                'PYTHON')

# split features into single shapefiles
arcpy.SplitByAttributes_analysis(src, arcpy.env.workspace, 'FEAT')

# polygon to raster (using ones column)
for feat in arcpy.ListFeatureClasses():
    fn = os.path.splitext(feat)[0]
    ras_fn = '{}.tif'.format(fn)
    arcpy.PolygonToRaster_conversion(feat, 'ONES', ras_fn)

# sum rasters
rasters = arcpy.ListRasters()
arcpy.CheckOutExtension('Spatial')
out_ras = arcpy.sa.CellStatistics(rasters, 'SUM')
out_ras.save(dst)

# remove folder with polygons and rasters
# shutil.rmtree(arcpy.env.workspace)
Exemplo n.º 6
0
def hru2(path0, path, outMerge):
    print(
        'add latitude,longitude, HRU ID, and Elevation information to the HRU feature class'
    )
    import arcpy, os, re
    # from arcpy.sa import *
    from arcpy import env
    arcpy.env.workspace = path
    #   arcpy.env.scratchWorkspace = path
    # outMerge = os.path.join(path, "HRU5" + "." + "shp")
    #   outMerge = os.path.join(path, "output.gdb", "HRU5")

    # arcpy.CopyFeatures_management("HRU4.shp", outMerge)
    arcpy.DeleteField_management(outMerge, "ident")

    # add latitude, longitude, elevation, area (km2), to the HRU feature class
    arcpy.DeleteField_management(outMerge, ["POLY_AREA"])
    arcpy.AddGeometryAttributes_management(outMerge, "AREA", "METERS",
                                           "SQUARE_KILOMETERS")  # area in km2

    # add longitude, latitude fields
    arcpy.AddField_management(outMerge, "latitude", "DOUBLE", "", "", 16)
    arcpy.AddField_management(outMerge, "longitude", "DOUBLE", "", "", 16)

    # gcs = arcpy.Describe("HRU.shp").spatialReference  # HRU.shp is a projected coordinate system. for latitude, longitude we need geographic coordinate system.
    sr = arcpy.SpatialReference(4269)  # EPSG oce of NAD83=4269

    with arcpy.da.UpdateCursor(outMerge,
                               ['SHAPE@', 'latitude', 'longitude']) as rows:
        for row in rows:
            pnt_sr = row[0].projectAs(sr)
            row[1:] = [pnt_sr.centroid.Y,
                       pnt_sr.centroid.X]  # will be in decimal degrees
            rows.updateRow(row)
    del rows

    # Replace none values in attribute table
    with arcpy.da.UpdateCursor(outMerge,
                               ['soil_type', 'LU_type', 'slope']) as rows:
        for row in rows:
            if row[0] == None:
                row[0] = "1"
            if row[1] == None:
                row[1] = "1"
            if row[2] == None:
                row[2] = "1"
            rows.updateRow(row)
    del rows

    # add mean elevation to the field
    #altitude = os.path.join(path, "HRU2" + "." + "shp")
    altitude = os.path.join(path0, "altitude" + "." + "tif")
    elev_point = os.path.join(path, "elev_point")

    arcpy.RasterToPoint_conversion(altitude, elev_point, "Value")
    # Split feature class into multiple polygons based on landuse ID and then run the spatial join to add mean elevation to the attribute table, and finaly merge all polygons

    arcpy.SplitByAttributes_analysis(outMerge, path, ['LU_type'])

    fci = [
        "T0_0", "T1_0", "T2_0", "T3_0", "T4_0", "T5_0", "T6_0", "T7_0", "T8_0",
        "T9_0"
    ]  #these are splitted shapefiles
    fco = [
        "T0_0_sj", "T1_0_sj", "T2_0_sj", "T3_0_sj", "T4_0_sj", "T5_0_sj",
        "T6_0_sj", "T7_0_sj", "T8_0_sj", "T9_0_sj"
    ]  #these are splitted shapefiles
    for i in range(len(fci)):
        fnp = os.path.join(path, fci[i])  # the input shapefile
        fieldmappings = arcpy.FieldMappings()
        # fieldmappings.addInputField("HRU.shp","Mean_elev")
        fieldmappings.addTable(
            fnp)  # add attribute table of HRU feature class to the fieldmap
        fieldmappings.addTable(
            elev_point
        )  # add attribute table of elev_point feature class to the fieldmap

        elevpntFieldIndex = fieldmappings.findFieldMapIndex("grid_code")
        fieldmap = fieldmappings.getFieldMap(elevpntFieldIndex)

        # Get the output field's properties as a field object
        field = fieldmap.outputField

        # Rename the field and pass the updated field object back into the field map
        field.name = "Mean_Elev"
        field.aliasName = "Mean_Elev"
        fieldmap.outputField = field

        # Set the merge rule to mean and then replace the old fieldmap in the mappings object
        # with the updated one
        fieldmap.mergeRule = "mean"
        fieldmappings.replaceFieldMap(elevpntFieldIndex, fieldmap)

        # Run the Spatial Join tool, using the defaults for the join operation and join type
        #      HRU6 = os.path.join(path, "output.gdb", "HRU6")
        #  HRU6 = os.path.join(path, "HRU6" + "." + "shp")

        arcpy.SpatialJoin_analysis(fci[i], elev_point, fco[i], "#", "#",
                                   fieldmappings)

    HRU6 = os.path.join(path, "HRU6")
    arcpy.Merge_management(fco, HRU6)

    arcpy.DeleteField_management(HRU6, ["ident", "Join_Count", "TARGET_FID"])
    arcpy.Delete_management(fci[0])
    arcpy.Delete_management(fci[1])
    arcpy.Delete_management(fci[2])
    arcpy.Delete_management(fci[3])
    arcpy.Delete_management(fci[4])
    arcpy.Delete_management(fci[5])
    arcpy.Delete_management(fci[6])
    arcpy.Delete_management(fci[7])
    arcpy.Delete_management(fci[8])
    arcpy.Delete_management(fci[9])

    arcpy.Delete_management(fco[0])
    arcpy.Delete_management(fco[1])
    arcpy.Delete_management(fco[2])
    arcpy.Delete_management(fco[3])
    arcpy.Delete_management(fco[4])
    arcpy.Delete_management(fco[5])
    arcpy.Delete_management(fco[6])
    arcpy.Delete_management(fco[7])
    arcpy.Delete_management(fco[8])
    arcpy.Delete_management(fco[9])

    # delete intermediate feature classes

    # Numbering the HRU feature class
    arcpy.AddField_management(HRU6, "HRU_ID", "LONG", "", "", 16)
    HRU_ID = 1
    with arcpy.da.UpdateCursor(HRU6, "HRU_ID") as cursor:
        for row in cursor:
            row[0] = HRU_ID
            HRU_ID = HRU_ID + 1
            cursor.updateRow(row)

    print('done!')
Exemplo n.º 7
0
    # Start procedure to split input shapefiles
    fgdb = root + os.sep + "fGDB.gdb"
    fcname = os.path.split(pgshp)[1]
    fcname = fcname.replace('.shp', '')
    infc = fgdb + os.sep + fcname

    # Create File GDB
    if not arcpy.Exists(fgdb):
        arcpy.CreateFileGDB_management(root, "fGDB.gdb")

    # Convert input polygon shp to feature class
    arcpy.FeatureClassToGeodatabase_conversion(pgshp, fgdb)

    # Split shapefile by unique field into layers
    arcpy.AddMessage("Splitting polygon shapefile")
    arcpy.SplitByAttributes_analysis(infc, root, pgsf)

    # Delete file Geodatabase
    if arcpy.Exists(fgdb):
        arcpy.Delete_management(fgdb)

# Set Workspace to input folder
arcpy.env.workspace = root

# Enable overwriting
arcpy.env.overwriteOutput = True

# Define variables related to tiff files in the input folder
pattern = "prate_*.tif"  # Pattern that will be used to find & prepare a list of raster files
lTIFs = [
]  # Create a blank list that would be populated by input geotiff files later
Exemplo n.º 8
0
    elif str == "NB":
        return 1
    elif str == "SB":
        return 0
    elif str == "CL":
        return 1
    elif str == "CC":
        return 0
    else:
        return -1'''

arcpy.CalculateField_management(
    routes_dir, 'NewDir', "direction(!DirName!)", "PYTHON3",
    dirRouteBlock)  # use code block above to cal the field
split_gdb = arcpy.SplitByAttributes_analysis(
    routes_dir, os.path.join(root_dir, f'routes_split_{year}.gdb'),
    ["RouteAbbr", "NewDir"
     ])[0]  # ! I can't tell whether this give me two files, or what happens

replaceGDB(root_dir, f'stops_split_{year}.gdb')

# the same as above, for stops
# ! can stops be defined with multiple directions depending on the route?
if 'NewDir' in arcpy.Describe(os.path.join(
        gdb, "MetroBusStopsByLine__200120")).fields:
    arcpy.DeleteField_management(
        os.path.join(gdb, "MetroBusStopsByLine__200120"), 'NewDir')

dirBlock = '''def dirCalc(str):
    if "EAST" in str :
        return "0"
    elif "WEST" in str:
Exemplo n.º 9
0
def split_by_attributes(input_fc, workspace, split_fields):
    arcpy.SplitByAttributes_analysis(
        Input_Table=input_fc,
        Target_Workspace=workspace,
        Split_Fields=split_fields,
    )
arcpy.RepairGeometry_management("all_wdpa_polybuffpnt_transboundary_novelarea_intersect","DELETE_NULL","OGC")

#  recalculate ISO3 based on the geo iso3
arcpy.CalculateField_management("all_wdpa_polybuffpnt_transboundary_novelarea_intersect","WDPA_ISO3","!GEO_ISO3!","PYTHON_9.3")

# rename the nontransboundary sites
#arcpy.Rename_management(r"in_memory\all_wdpa_polybuffpnt_nontransboundary",r"in_memory\all_wdpa_polybuffpnt_national")

# append back the erased and intersected transboundary sites back into the nontransboundary sites
arcpy.Append_management(r"in_memory\all_wdpa_polybuffpnt_nontransboundary","all_wdpa_polybuffpnt_transboundary_novelarea_intersect","NO_TEST")

# repair it
arcpy.RepairGeometry_management(r"in_memory\all_wdpa_polybuffpnt_nontransboundary","DELETE_NULL","OGC")

# split by attribute (wdpa_iso3) to create an individual fc for each iso3
arcpy.SplitByAttributes_analysis(r"in_memory\all_wdpa_polybuffpnt_nontransboundary",sbafolder, "WDPA_ISO3")

# change the location of the workspace to represent the location of the sba output
arcpy.env.workspace = str(sbafolder)
arcpy.env.overwriteOutput = True

out_sba = arcpy.ListFeatureClasses()

#  split the input into country specific subsets and do the analysis iso3 by iso3
for fc in out_sba:
    desc = arcpy.Describe(fc)

    # run a union, add in an xyco for each segment
    arcpy.Union_analysis(fc,r"in_memory\Union")
    arcpy.RepairGeometry_management(r"in_memory\union","DELETE_NULL","OGC")