コード例 #1
0
ファイル: FIND_DM_Reports_v.5.py プロジェクト: memoore/FIND
def CreatePivotTable(inTable, outTable):
    '''function that calculates statistics and creates pivot table'''

    arcpy.Statistics_analysis(inTable, outTable, "refcode COUNT;dm_stat COUNT",
    "refcode;dm_stat")

    arcpy.PivotTable_management(outTable, 'refcode', 'dm_stat', 'FREQUENCY',
    os.path.join(env.workspace, "pivotTable"))
コード例 #2
0
def CreatePivotTable(inTable, outTable):
    '''function that creates pivot table'''

    # creates statistics table counting number of records for each DM status for each refcode
    arcpy.Statistics_analysis(inTable, outTable, "refcode COUNT;dm_stat COUNT",
                              "refcode;dm_stat")

    # convert into pivot table
    arcpy.PivotTable_management(outTable, 'refcode', 'dm_stat', 'FREQUENCY',
                                os.path.join(env.workspace, "pivotTable"))
コード例 #3
0
def zonal_attribution_of_polygon_data(zone_fc, zone_field, class_fc, out_table, class_field, rename_tag=''):

    def rename_to_standard(table):
        arcpy.AddMessage("Renaming.")

        # look up the values based on the rename tag
        this_files_dir = os.path.dirname(os.path.abspath(__file__))
        os.chdir(this_files_dir)
        geo_file = os.path.abspath('../geo_metric_provenance.csv')
        with open(geo_file) as csv_file:
            reader = csv.DictReader(csv_file)
            mapping = {row['subgroup_original_code']: row['subgroup']
                       for row in reader if row['main_feature'] in rename_tag and row['main_feature']}
            arcpy.AddMessage(mapping)

        # update them
        for old, new in mapping.items():
            arcpy.AddMessage(new)
            old_fname = '{}'.format(old)
            new_fname = '{}_{}_pct'.format(rename_tag, new)
            if arcpy.ListFields(table, old_fname):
                try:
                    # same problem with AlterField limit of 31 characters here.
                    arcpy.AlterField_management(table, old_fname, new_fname, clear_field_alias=True)
                except:
                    cu.rename_field(table, old_fname, new_fname, deleteOld=True)
        return table


    arcpy.env.workspace = 'in_memory'
    tab = arcpy.TabulateIntersection_analysis(zone_fc, zone_field, class_fc, 'tab', class_field)
    pivot = arcpy.PivotTable_management(tab, zone_field, class_field, "PERCENTAGE", 'pivot')
    renamed = rename_to_standard(pivot)
    arcpy.CopyRows_management(renamed, out_table)
    for item in [tab, pivot, renamed]:
        arcpy.Delete_management(item)
コード例 #4
0
def shallowWaterRef():
    util.log(
        "Starting shallowWaterRef module ---------------------------------------------------------------"
    )

    #dissolve EDT reaches into 1 polygon
    reach_diss = arcpy.Dissolve_management(config.EDT_reaches,
                                           r"in_memory" + r"\reach_diss", "#",
                                           "#", "MULTI_PART", "DISSOLVE_LINES")

    util.log("Clipping depth raster to EDT reach extent")
    arcpy.CheckOutExtension("Spatial")
    depth_clip = arcpy.Clip_management(config.river_depth, "#",
                                       config.temp_gdb + r"\depth_clip",
                                       reach_diss, "-3.402823e+038",
                                       "ClippingGeometry",
                                       "NO_MAINTAIN_EXTENT")

    util.log(
        "Converting depth raster to positive values and adjusting to ordinary low water mark"
    )
    depth_raster = arcpy.sa.Raster(depth_clip)
    lowWater_conversion = 15
    raster_adj = abs(depth_raster) - lowWater_conversion

    # get rid of negative values
    raster_noNeg = arcpy.sa.SetNull(raster_adj < 0, raster_adj)

    # reclassify to above and below 20'
    util.log("Reclassifying to above and below 20' depth")
    # 0-20' set to 0, > 20' set to 1
    reclass_mapping = "0 20 0;20 200 1"
    raster_reclass = arcpy.sa.Reclassify(raster_noNeg, "Value",
                                         reclass_mapping, "DATA")

    #convert to polygon
    util.log("Conveting raster to polygon")
    shallow_vect = arcpy.RasterToPolygon_conversion(
        raster_reclass, config.temp_gdb + r"\shallow_vect")

    #summarize data
    util.log("Creating summary table")
    summary = arcpy.Statistics_analysis(
        shallow_vect, config.temp_gdb + r"\shallow_summary_table",
        "Shape_Area SUM", "gridcode")

    #pivot info
    util.log("Creating pivot table")
    arcpy.AddField_management(summary, "input_field", "SHORT")
    with arcpy.da.UpdateCursor(summary, "input_field") as rows:
        for row in rows:
            row[0] = 100
            rows.updateRow(row)
    ShallowWater_final = arcpy.PivotTable_management(
        summary, "input_field", "gridcode", "SUM_Shape_Area",
        config.temp_gdb + r"\ShallowWater_final")

    # calculate # of total
    util.log("Calc % shallow water")
    rate_field = "Pcnt_Shallow"
    arcpy.AddField_management(ShallowWater_final, rate_field, "Double")
    cursor_fields = ["gridcode0", "gridcode1", rate_field]
    with arcpy.da.UpdateCursor(ShallowWater_final, cursor_fields) as rows:
        for row in rows:
            row[2] = (row[0] / (row[0] + row[1])) * 100
            rows.updateRow(row)

    # WHI score
    util.log("Calc WHI score")
    score_field = "shallow_water_score"
    arcpy.AddField_management(ShallowWater_final, score_field, "DOUBLE")
    with arcpy.da.UpdateCursor(ShallowWater_final,
                               [rate_field, score_field]) as rows:
        for row in rows:
            row[1] = calc.shallowWater_score(row[0])
            rows.updateRow(row)

    # convert output to table if needed
    util.tableTo_primaryOutput(ShallowWater_final)

    util.log("Cleaning up")
    arcpy.Delete_management("in_memory")

    util.log(
        "Module complete ---------------------------------------------------------------"
    )
コード例 #5
0
def main(fcLineNetwork,
         fieldStreamRouteID,
         fieldConfinement,
         fieldConstriction,
         strSeedDistance,
         inputliststrWindowSize,
         outputWorkspace,
         tempWorkspace=arcpy.env.scratchWorkspace):
    """Perform a Moving Window Analysis on a Line Network."""

    liststrWindowSize = inputliststrWindowSize.split(";")

    fcLineNetworkDissolved = gis_tools.newGISDataset(
        tempWorkspace, "GNAT_MWA_LineNetworkDissolved")
    arcpy.Dissolve_management(fcLineNetwork,
                              fcLineNetworkDissolved,
                              fieldStreamRouteID,
                              multi_part=False,
                              unsplit_lines=True)

    listLineGeometries = arcpy.CopyFeatures_management(fcLineNetworkDissolved,
                                                       arcpy.Geometry())

    listWindows = []
    listSeeds = []
    listWindowEvents = []
    listgWindows = []
    intSeedID = 0

    iRoutes = int(
        arcpy.GetCount_management(fcLineNetworkDissolved).getOutput(0))
    arcpy.SetProgressor("step", "Processing Each Route", 0, iRoutes, 1)
    iRoute = 0
    with arcpy.da.SearchCursor(
            fcLineNetworkDissolved,
        ["SHAPE@", fieldStreamRouteID, "SHAPE@LENGTH"]) as scLines:
        for fLine in scLines:  #Loop Through Routes
            arcpy.SetProgressorLabel("Route: " + str(iRoute) +
                                     " Seed Point: " + str(intSeedID))
            arcpy.SetProgressorPosition(iRoute)
            gLine = fLine[0]
            dblSeedPointPosition = float(
                max(liststrWindowSize
                    )) / 2  #Start Seeds at position of largest window
            while dblSeedPointPosition + float(
                    max(liststrWindowSize)) / 2 < fLine[2]:
                arcpy.SetProgressorLabel("Route: " + str(iRoute) +
                                         " Seed Point: " + str(intSeedID))
                gSeedPointPosition = gLine.positionAlongLine(
                    dblSeedPointPosition)
                listSeeds.append([
                    scLines[1], intSeedID, gSeedPointPosition
                ])  #gSeedPointPosition.X,gSeedPointPosition.Y])
                for strWindowSize in liststrWindowSize:
                    dblWindowSize = float(strWindowSize)
                    dblLengthStart = dblSeedPointPosition - dblWindowSize / 2
                    dblLengthEnd = dblSeedPointPosition + dblWindowSize / 2

                    gPointStartLocation = gLine.positionAlongLine(
                        dblLengthStart)
                    gPointEndLocation = gLine.positionAlongLine(dblLengthEnd)
                    gTemp = arcpy.Geometry()
                    listgWindowTemp = arcpy.SplitLineAtPoint_management(
                        gLine, [gPointStartLocation, gPointEndLocation], gTemp,
                        "1 METER")
                    #TODO: Need a better method to select the line here!!
                    for gWindowTemp in listgWindowTemp:
                        if abs(gWindowTemp.length - dblWindowSize) < 10:
                            listgWindows.append([
                                scLines[1], intSeedID, dblWindowSize,
                                gWindowTemp
                            ])
                    # End TODO
                    listWindows.append([
                        scLines[1], intSeedID, dblWindowSize,
                        gPointStartLocation
                    ])
                    listWindows.append([
                        scLines[1], intSeedID, dblWindowSize, gPointEndLocation
                    ])
                    listWindowEvents.append([
                        scLines[1], intSeedID, dblWindowSize, dblLengthStart,
                        dblLengthEnd
                    ])
                dblSeedPointPosition = dblSeedPointPosition + float(
                    strSeedDistance)
                intSeedID = intSeedID + 1
            iRoute = iRoute + 1

    fcSeedPoints = gis_tools.newGISDataset(tempWorkspace,
                                           "GNAT_MWA_SeedPoints")
    fcWindowEndPoints = gis_tools.newGISDataset(tempWorkspace,
                                                "GNAT_MWA_WindowEndPoints")
    fcWindowLines = gis_tools.newGISDataset(tempWorkspace,
                                            "GNAT_MWA_WindowLines")

    arcpy.CreateFeatureclass_management(tempWorkspace,
                                        "GNAT_MWA_SeedPoints",
                                        "POINT",
                                        spatial_reference=fcLineNetwork)
    arcpy.CreateFeatureclass_management(tempWorkspace,
                                        "GNAT_MWA_WindowEndPoints",
                                        "POINT",
                                        spatial_reference=fcLineNetwork)
    arcpy.CreateFeatureclass_management(tempWorkspace,
                                        "GNAT_MWA_WindowLines",
                                        "POLYLINE",
                                        spatial_reference=fcLineNetwork)

    gis_tools.resetField(fcSeedPoints, "RouteID", "LONG")
    gis_tools.resetField(fcSeedPoints, "SeedID", "LONG")

    gis_tools.resetField(fcWindowEndPoints, "RouteID", "LONG")
    gis_tools.resetField(fcWindowEndPoints, "SeedID", "LONG")
    gis_tools.resetField(fcWindowEndPoints, "Seg", "DOUBLE")

    gis_tools.resetField(fcWindowLines, "RouteID", "LONG")
    gis_tools.resetField(fcWindowLines, "SeedID", "LONG")
    gis_tools.resetField(fcWindowLines, "Seg", "DOUBLE")

    with arcpy.da.InsertCursor(
            fcSeedPoints, ["RouteID", "SeedID", "SHAPE@XY"]) as icSeedPoints:
        for row in listSeeds:
            icSeedPoints.insertRow(row)

    with arcpy.da.InsertCursor(
            fcWindowEndPoints,
        ["RouteID", "SeedID", "Seg", "SHAPE@XY"]) as icWindowEndPoints:
        for row in listWindows:
            icWindowEndPoints.insertRow(row)

    with arcpy.da.InsertCursor(
            fcWindowLines,
        ["RouteID", "SeedID", "Seg", "SHAPE@"]) as icWindowLines:
        for row in listgWindows:
            icWindowLines.insertRow(row)

    fcIntersected = gis_tools.newGISDataset(
        tempWorkspace, "GNAT_MWA_IntersectWindowAttributes")
    arcpy.Intersect_analysis([fcWindowLines, fcLineNetwork],
                             fcIntersected,
                             "ALL",
                             output_type="LINE")

    # Confinement
    tblSummaryStatisticsConfinement = gis_tools.newGISDataset(
        tempWorkspace, "GNAT_MWA_SummaryStatsTableConfinement")
    arcpy.Statistics_analysis(
        fcIntersected, tblSummaryStatisticsConfinement, "Shape_Length SUM",
        fieldStreamRouteID + ";SeedID;Seg;" + fieldConfinement)

    tblSummaryStatisticsPivot = gis_tools.newGISDataset(
        tempWorkspace, "GNAT_MWA_SummaryStatisticsPivotTable")
    arcpy.PivotTable_management(tblSummaryStatisticsConfinement,
                                "Route;SeedID;Seg", fieldConfinement,
                                "SUM_Shape_Length", tblSummaryStatisticsPivot)

    fieldConfinementValue = gis_tools.resetField(tblSummaryStatisticsPivot,
                                                 "CONF_Value", "DOUBLE")

    if len(arcpy.ListFields(tblSummaryStatisticsPivot,
                            fieldConfinement + "1")) == 0:
        arcpy.AddField_management(tblSummaryStatisticsPivot,
                                  fieldConfinement + "1", "DOUBLE")
    if len(arcpy.ListFields(tblSummaryStatisticsPivot,
                            fieldConfinement + "0")) == 0:
        arcpy.AddField_management(tblSummaryStatisticsPivot,
                                  fieldConfinement + "0", "DOUBLE")

    arcpy.CalculateField_management(
        tblSummaryStatisticsPivot, fieldConfinementValue,
        "!" + fieldConfinement + "1!/(!" + fieldConfinement + "0! + !" +
        fieldConfinement + "1!)", "PYTHON")

    #Pivot Confinement on Segment Size
    tblSummaryStatisticsWindowPivot = gis_tools.newGISDataset(
        tempWorkspace, "GNAT_MWA_SummaryStatisticsWindowPivotTable")
    arcpy.PivotTable_management(tblSummaryStatisticsPivot,
                                fieldStreamRouteID + ";SeedID", "Seg",
                                fieldConfinementValue,
                                tblSummaryStatisticsWindowPivot)

    # Constriction

    tblSummaryStatisticsConstriction = gis_tools.newGISDataset(
        tempWorkspace, "GNAT_MWA_SummaryStatsTableConstriction")
    arcpy.Statistics_analysis(
        fcIntersected, tblSummaryStatisticsConstriction, "Shape_Length SUM",
        fieldStreamRouteID + ";SeedID;Seg;" + fieldConstriction)

    tblSummaryStatisticsPivotConstriction = gis_tools.newGISDataset(
        tempWorkspace, "GNAT_MWA_SummaryStatisticsPivotTableConsriction")
    arcpy.PivotTable_management(tblSummaryStatisticsConstriction,
                                "Route;SeedID;Seg", fieldConstriction,
                                "SUM_Shape_Length",
                                tblSummaryStatisticsPivotConstriction)

    fieldConstrictionValue = gis_tools.resetField(
        tblSummaryStatisticsPivotConstriction, "CNST_Value", "DOUBLE")
    if len(
            arcpy.ListFields(tblSummaryStatisticsConstriction,
                             fieldConstriction + "1")) == 0:
        arcpy.AddField_management(tblSummaryStatisticsConstriction,
                                  fieldConstriction + "1", "DOUBLE")
    if len(
            arcpy.ListFields(tblSummaryStatisticsConstriction,
                             fieldConstriction + "0")) == 0:
        arcpy.AddField_management(tblSummaryStatisticsConstriction,
                                  fieldConstriction + "0", "DOUBLE")

    arcpy.CalculateField_management(
        tblSummaryStatisticsPivotConstriction, fieldConstrictionValue,
        "!" + fieldConstriction + "1!/(!" + fieldConstriction + "0! + !" +
        fieldConstriction + "1!)", "PYTHON")
    tblSummaryStatisticsWindowPivotConstriction = gis_tools.newGISDataset(
        tempWorkspace,
        "GNAT_MWA_SummaryStatisticsWindowPivotTableConstriction")
    arcpy.PivotTable_management(tblSummaryStatisticsPivotConstriction,
                                fieldStreamRouteID + ";SeedID", "Seg",
                                fieldConstrictionValue,
                                tblSummaryStatisticsWindowPivotConstriction)

    strWindowSizeFields = ""
    for WindowSize in liststrWindowSize:
        strWindowSizeFields = strWindowSizeFields + ";Seg" + WindowSize
    strWindowSizeFields = strWindowSizeFields.lstrip(";")

    #Join Above table to seed points
    arcpy.JoinField_management(fcSeedPoints, "SeedID",
                               tblSummaryStatisticsWindowPivot, "SeedID",
                               strWindowSizeFields)
    arcpy.JoinField_management(fcSeedPoints, "SeedID",
                               tblSummaryStatisticsWindowPivotConstriction,
                               "SeedID", strWindowSizeFields)

    # Manage Outputs
    fcOutputSeedPoints = gis_tools.newGISDataset(outputWorkspace,
                                                 "MovingWindowSeedPoints")
    arcpy.CopyFeatures_management(fcSeedPoints, fcOutputSeedPoints)

    fcOutputWindows = gis_tools.newGISDataset(outputWorkspace,
                                              "MovingWindowSegments")
    arcpy.CopyFeatures_management(fcWindowLines, fcOutputWindows)

    return
コード例 #6
0
ファイル: Pivot.py プロジェクト: Travis-Volpe/ArcPy_Projects
# -*- coding: utf-8 -*-
# ---------------------------------------------------------------------------
# Pivot.py
# Created on: 2018-05-18 11:31:22.00000
#   (generated by ArcGIS/ModelBuilder)
# Description:
# ---------------------------------------------------------------------------

# Set the necessary product code
# import arcinfo

# Import arcpy module
import arcpy

# Local variables:
v2018_Watering_Trees = "2018 Watering Trees"
Test_Pivot_2_csv = "D:\\WateringMaps\\Models\\Excel_Outputs\\Test_Pivot_2.csv"

# Process: Pivot Table
arcpy.PivotTable_management(v2018_Watering_Trees, "Location", "Year",
                            "Jurisdictn", Test_Pivot_2_csv)
コード例 #7
0
                cursor.updateRow(row)
            if row[1] == "survey_poly":
                row[1] = "Survey Sites"
                cursor.updateRow(row)

    with arcpy.da.UpdateCursor(elementRecords, "created_date") as cursor:
        for row in cursor:
            if row[0] > datetime.datetime(int(year), int(q), 01, 0, 0, 0, 0):
                cursor.deleteRow()

    summaryTable = arcpy.Statistics_analysis(
        elementRecords, "summaryTable", "Feature_Class COUNT;dm_stat COUNT",
        "Feature_Class;dm_stat")

    pivotTable = arcpy.PivotTable_management(summaryTable, 'Feature_Class',
                                             'dm_stat', 'FREQUENCY',
                                             "pivotTable")

    arcpy.AddField_management(pivotTable, "Total", "LONG", "", "", 8, "Total")
    arcpy.CalculateField_management(
        pivotTable, "Total",
        "!DM_Pending! + !DM_Processed! + !Ready_for_DM! + !Draft! + !Ready_for_ID_Review!",
        "PYTHON_9.3")

    # export table as Excel file to produce final report
    filename = "FIND Quarterly Report " + time.strftime("%d%b%y") + ".xls"
    outTable = os.path.join(reportPath, filename)
    arcpy.TableToExcel_conversion(pivotTable, outTable)
    print "DCNR FIND Quarterly Report Created!"

コード例 #8
0
FIA_Data_sde = "C:\\Users\\rlennon\\Downloads\\NYS_Civil_Boundaries.shp\\FIA Data.sde"
Output_Report_File = ""
Output_Results_Table = ""
output = "C:\\Users\\rlennon\\Desktop\\output"

# Process: Create Database Connection
arcpy.CreateDatabaseConnection_management(NYS_Civil_Boundaries_shp, "FIA Data",
                                          "SQL_SERVER", "FIA", "DATABASE_AUTH",
                                          "", "*****", "SAVE_USERNAME", "", "",
                                          "TRANSACTIONAL", "", "")

# Process: Create Table
arcpy.CreateTable_management(FIA_Data_sde, "FIA DATA", "", "")

# Process: Pivot Table
arcpy.PivotTable_management(FIA_DATA, "", "", "", FIADATA_PivotTable)

# Process: Add Join
arcpy.AddJoin_management("", "", FIADATA_PivotTable, "", "KEEP_ALL")

# Process: Create Feature Dataset
arcpy.CreateFeatureDataset_management("", "", "")

# Process: Exploratory Regression
arcpy.ExploratoryRegression_stats("", "", "", "", Output_Report_File,
                                  Output_Results_Table, "5", "1", "0.5",
                                  "0.05", "7.5", "0.1", "0.1")

# Process: Create LAS Dataset
arcpy.CreateLasDataset_management("", Output_LAS_Dataset, "NO_RECURSION", "",
                                  "", "NO_COMPUTE_STATS", "ABSOLUTE_PATHS")
コード例 #9
0
    # Process: Add Field 
    arcpy.AddField_management(out_summarized_dbf, "LandUse", "TEXT", "", "", "", "", "NULLABLE", "NON_REQUIRED", "")

    # Process: Calculate Field 
    arcpy.CalculateField_management(out_summarized_dbf, "LandUse", "Reclass(!Value!, !LandUse!)", "PYTHON", "def Reclass(Value, LandUse):\\n    if  Value == 1:\\n        return 'Water_WL'\\n    elif  Value == 2:\\n        return 'Forest'\\n    elif Value == 3:\\n        return 'Grass'\\n    elif Value == 4:\\n        return 'AgrLand'\\n    elif Value == 5:\\n      return 'URS'")

    # Process: Add Field
    arcpy.AddField_management(out_summarized_dbf, "PivotID", "DOUBLE", "", "", "", "", "NULLABLE", "NON_REQUIRED", "")

    # Process: Calculate Field
    arcpy.CalculateField_management(out_summarized_dbf, "PivotID", "1", "PYTHON_9.3", "")

    # Process: Pivot Table
    out_pivot_table = Summarized_Stat_folder + '\\' + s_name + '_pivot.dbf'
    arcpy.PivotTable_management(out_summarized_dbf, "PivotID", "LandUse", "Count", out_pivot_table)

    LU_lst = ["AgrLand", "URS", "Grass", "Forest", "Water_WL"]
    for landuse in LU_lst:
        pivot_column = arcpy.ListFields(out_pivot_table, landuse)
        if len(pivot_column) != 1:
            arcpy.AddField_management(out_pivot_table, landuse, "DOUBLE", "", "", "", "", "NON_NULLABLE", "NON_REQUIRED", "")
            arcpy.CalculateField_management(out_pivot_table, landuse, "0", "PYTHON_9.3", "")

    # Process: Summary Statistics
    out_for_combine_dbf = Summarized_Stat_folder + '\\' + s_name + '_forCombine.dbf'
    arcpy.Statistics_analysis(out_pivot_table, out_for_combine_dbf, "AgrLand MAX;Forest MAX;Grass MAX;URS MAX;Water_WL MAX", "OID")

    # Process: Add Field
    arcpy.AddField_management(out_for_combine_dbf, "LYear", "TEXT", "", "", "", "", "NULLABLE", "NON_REQUIRED", "")
コード例 #10
0
                           "UNIQUE")
 arcpy.JoinField_management(tempDz, "UNIQUE", tempSumTbl, "UNIQUE",
                            "SUM_TOTPOP_CY")
 arcpy.MakeFeatureLayer_management(tempDz, tempLyrDz)
 arcpy.SelectLayerByAttribute_management(tempLyrDz, "NEW_SELECTION",
                                         "\"SUM_TOTPOP_CY\" IS NULL")
 arcpy.CalculateField_management(tempLyrDz, "SUM_TOTPOP_CY", 0)
 arcpy.AddField_management(tempDz, "NAME", "TEXT", "", "", 30)
 arcpy.AddField_management(tempDz, "CLASS", "TEXT", "", "", 20)
 arcpy.CalculateField_management(tempDz, "NAME",
                                 '!UNIQUE!.partition("_") [0]',
                                 "PYTHON_9.3")
 arcpy.CalculateField_management(tempDz, "CLASS",
                                 '!UNIQUE!.partition("_") [2]',
                                 "PYTHON_9.3")
 arcpy.PivotTable_management(tempDz, "NAME", "CLASS", "SUM_TOTPOP_CY",
                             scratchGDB + "tbl_pivot_" + fc)
 arcpy.Project_management(fc, finalGDB + fc, outCS,
                          "WGS_1984_(ITRF00)_To_NAD_1983")
 arcpy.JoinField_management(finalGDB + fc, "NAME",
                            scratchGDB + "tbl_pivot_" + fc, "NAME", [
                                "Advanced_TOTPOP", "Basic_TOTPOP",
                                "Moderate_TOTPOP", "Standard_TOTPOP"
                            ])
 #Select Null Values in the TOTPOP Fields and Change to Zero
 arcpy.MakeFeatureLayer_management(finalGDB + fc, tempLyrFinal)
 arcpy.SelectLayerByAttribute_management(tempLyrFinal, "NEW_SELECTION",
                                         "\"Advanced_TOTPOP\" IS NULL")
 arcpy.CalculateField_management(tempLyrFinal, "Advanced_TOTPOP", 0)
 arcpy.SelectLayerByAttribute_management(tempLyrFinal, "NEW_SELECTION",
                                         "\"Standard_TOTPOP\" IS NULL")
 arcpy.CalculateField_management(tempLyrFinal, "Standard_TOTPOP", 0)
コード例 #11
0
                          field_length=20)
arcpy.CalculateField_management(infile, 'ParamSht', expression, 'PYTHON_9.3',
                                codeblock)

arcpy.TableSelect_analysis(infile, temptb, '"ParamSht" IS NOT NULL')

arcarray = arcpy.da.TableToNumPyArray(temptb, 'ParamSht')
parameters = arcarray['ParamSht']
fields = np.unique(parameters)
fields = fields.tolist()
fields.append('SampleId')
# Set a variable to store value field name
valueFieldName = "ResultValue"
out = arcpy.GetParameterAsText(1)

arcpy.PivotTable_management(temptb, 'SampleId', 'ParamSht', 'Value_edit',
                            temptb1)
arr = arcpy.da.TableToNumPyArray(temptb1, fields, null_value='')
arcpy.Delete_management(temptb)

nosamp = len(arr['Ca'])
Cl = arr['Cl']
Mg = arr['Mg']
K = arr['K']
Ca = arr['Ca']
Na = arr['Na']
HCO3 = arr['HCO3']
CO3 = arr['CO3']
NaK = arr['NaK']
SO4 = arr['SO4']
NO3 = arr['NO3']
コード例 #12
0
ファイル: Clustering.py プロジェクト: LEONOB2014/NetworkGT
def main(infc, outfc, units, dissolve, connected, split):
    try:
        del_files = ["in_memory\\cluster", "in_memory\\cluster_diss"]

        #Parameter check
        if dissolve == 'true' or connected == 'true' or split == 'true':
            curfields = [f.name for f in arcpy.ListFields(infc)]
            if 'Connection' not in curfields:
                arcpy.AddError(
                    "Run Branches and Nodes tool prior to clustering or uncheck all options"
                )
                sys.exit()
        if connected == 'true':
            arcpy.MakeFeatureLayer_management(infc, "in_memory\\layer")
            arcpy.SelectLayerByAttribute_management(
                "in_memory\\layer", "NEW_SELECTION",
                "Connection = 'C - C' OR Connection = 'C - I' OR Connection = 'C - U'"
            )
            infc = "in_memory\\layer"

        arcpy.env.overwriteOutput = True
        arcpy.AddMessage('Clustering Network')
        arcpy.Buffer_analysis(infc, "in_memory\\cluster", "0.01 Meters",
                              "FULL", "ROUND", "NONE")
        curfields = [f.name for f in arcpy.ListFields(infc)]

        if split == 'true':
            arcpy.Dissolve_management("in_memory\\cluster",
                                      "in_memory\\cluster_diss", "Sample_No_",
                                      "", "SINGLE_PART")
            fields = ["Length", "Shape@", 'Sample_No_', 'Sample_No1']
        else:
            arcpy.Dissolve_management("in_memory\\cluster",
                                      "in_memory\\cluster_diss", "", "",
                                      "SINGLE_PART")
            fields = ["Length", "Shape@"]

        curfields2 = [
            f.name for f in arcpy.ListFields("in_memory\\cluster_diss")
        ]

        if 'Cluster' not in curfields2:
            arcpy.AddField_management("in_memory\\cluster_diss", "Cluster",
                                      "LONG")

        if 'Length' not in curfields2:
            arcpy.AddField_management("in_memory\\cluster_diss", "Length",
                                      "DOUBLE")

        with arcpy.da.UpdateCursor("in_memory\\cluster_diss",
                                   ["Cluster"]) as cursor:
            for enum, row in enumerate(cursor):
                row[0] = enum
                cursor.updateRow(row)

        if dissolve == 'true':
            del_files.append("in_memory\\cluster_sj")
            sj = "in_memory\\cluster_sj"
        else:
            sj = outfc

        arcpy.SpatialJoin_analysis(infc, "in_memory\\cluster_diss", sj,
                                   "JOIN_ONE_TO_MANY")
        with arcpy.da.UpdateCursor(sj, fields) as cursor:
            for row in cursor:
                if split == 'true':
                    if row[2] != row[3]:
                        cursor.deleteRow()
                        continue
                row[0] = row[1].getLength('PLANAR', units)
                cursor.updateRow(row)

        if dissolve == 'true':
            arcpy.Dissolve_management(sj, outfc, ['Cluster', 'Sample_No_'],
                                      [['Length', 'Min'], ['Length', 'Mean'],
                                       ['Length', 'Max'], ['Length', 'Count']],
                                      "MULTI_PART")

            pvt_table = os.path.join('in_memory',
                                     'pvt_' + os.path.basename(outfc))

            del_files.append(pvt_table)

            arcpy.PivotTable_management(sj, ['Cluster', 'Sample_No_'],
                                        'Connection', 'Length', pvt_table)
            fields = [f.name for f in arcpy.ListFields(pvt_table)[3:]]
            curfields = [f.name for f in arcpy.ListFields(outfc)]

            data = {}

            arcpy.AddMessage('Creating Fields')
            update_fields = []
            data_names = ['MIN', 'MEAN', 'MAX', 'SUM', 'COUNT']
            for enum, fname in enumerate(fields):
                for s in data_names:
                    alias = fname.replace('___', ' - ')
                    name_a = '%s ' % (alias) + s
                    name = '%s_' % (fname) + s[:4]
                    if name not in curfields:
                        arcpy.AddField_management(outfc, name, 'DOUBLE', "",
                                                  "", "", name_a)
                    update_fields.append(name)

            data = {}
            fields.extend(['Cluster', 'Sample_No_'])
            update_fields.extend(['Cluster', 'Sample_No_'])
            arcpy.AddMessage('Reading Data')
            with arcpy.da.SearchCursor(pvt_table, fields) as cursor:
                for row in cursor:
                    ID = (row[-1], row[-2])
                    if ID not in data:
                        values = {}
                        for enum, field in enumerate(fields[:-2]):
                            values[field] = []
                            if row[enum] > 0:
                                values[field] += [row[enum]]
                        data[ID] = values
                    else:
                        values = data[ID]
                        for enum, field in enumerate(fields[:-2]):
                            if row[enum] > 0:
                                values[field] += [row[enum]]

                        data[ID] = values

            arcpy.AddMessage('Updating Feature Class')
            with arcpy.da.UpdateCursor(outfc, update_fields) as cursor:
                for row in cursor:
                    try:
                        C = 0

                        ID = (row[-2], row[-1])
                        values = data[ID]

                        for field in fields[:-2]:
                            v = values[field]
                            if v:
                                row[C] = min(v)
                                C += 1
                                row[C] = numpy.mean(v)
                                C += 1
                                row[C] = max(v)
                                C += 1
                                row[C] = sum(v)
                                C += 1
                                row[C] = len(v)
                                C += 1

                            else:
                                for n in range(5):
                                    row[C] = 0
                                    C += 1

                        cursor.updateRow(row)
                    except Exception, e:
                        arcpy.AddMessage('%s' % (e))
                        continue

        else:
コード例 #13
0
    print('{0}, {1}'.format(row[0], row[1]))

################################################################################
#Part 4: For each fire find the average distance to all other fires

#Method 1 use point distance analyses
arcpy.PointDistance_analysis(in_features="SA_Fire_Pts",
    near_features="SA_Fire_Pts",
    out_table="C:/SA_Fires/Tabular/SA_Fire_Pts_PointDistance", # CHANGE
    search_radius="")

# Create pivot table if singular value wanted
PtDist_a = "SA_Fire_Pts_PointDistance"
Pivot_table = "C:SA_Fires/Tabular/PtDist_Pivot.csv"

arcpy.PivotTable_management(PtDist_a, "INPUT_FID", "DISTANCE", Pivot_table)

#Method 2: Use Numpy to calculate a distance matrix


################################################################################
# Part 5: Find Fires that are within 5km of a border (near analysis and search or buffer and select)
# NA than select feature where NDist <= 5km
# Near_analysis (in_features, near_features, {search_radius}, {location}, {angle}, {method})

arcpy.PolygonToLine_management(in_features="SouthAmerica",
    out_feature_class="C:SA_Fires/SHP/South_America_Line.shp",
    neighbor_option="IDENTIFY_NEIGHBORS")

arcpy.Near_analysis(in_features="SA_Fire_Pts",
    near_features="South_America_Line",
コード例 #14
0
# change the 'type' field in the non_abnj selection so that ABNJ is always changed to 'EEZ' (nationally designated sites go over into the geographic ABNJ)
arcpy.CalculateField_management(r"in_memory\all_wdpa_polybuffpnt_union_flat_intersect_project_nonabnj","type","!type!.replace('ABNJ','EEZ')", 'PYTHON3')

# run some summary stats on the Land + EEZ selection for the current year (current) and broken down per year (current)
arcpy.Statistics_analysis(r"in_memory\all_wdpa_polybuffpnt_union_flat_intersect_project_nonabnj","global_summary_statistics_current",[["AREA_GEO","SUM"]],"type")
arcpy.Statistics_analysis(r"in_memory\all_wdpa_polybuffpnt_union_flat_intersect_project_nonabnj","global_summary_statistics_temporal",[["AREA_GEO","SUM"]],["type", "STATUS_YR"])

# select out just the rows with an ISO3 of 'ABNJ'
arcpy.Select_analysis("all_wdpa_polybuffpnt_union_flat_intersect_project",r"in_memory\ABNJ_sites","WDPA_ISO3 = 'ABNJ'")

# run some global summary stats on the ABNJ selection for the current year (current) and broken down per year (temporal)
arcpy.Statistics_analysis(r"in_memory\ABNJ_sites",r"in_memory\abnj_global_summary_statistics_current",[["AREA_GEO","SUM"]],"type")
arcpy.Statistics_analysis(r"in_memory\ABNJ_sites",r"in_memory\abnj_global_summary_statistics_temporal",[["AREA_GEO","SUM"]],["type", "STATUS_YR"])

# pivot the global current, global temporal summary table and the abnj temporal summary tables
arcpy.PivotTable_management("global_summary_statistics_temporal",["STATUS_YR"],"type","SUM_AREA_GEO","global_summary_statistics_temporal_pivot")
arcpy.PivotTable_management(r"in_memory\abnj_global_summary_statistics_temporal",["STATUS_YR"],"type","SUM_AREA_GEO","abnj_summary_statistics_temporal_pivot")

# add the abnj tables into the global summary tables
arcpy.Append_management(r"in_memory\abnj_global_summary_statistics_current","global_summary_statistics_current","NO_TEST")
arcpy.JoinField_management("global_summary_statistics_temporal_pivot","STATUS_YR","abnj_summary_statistics_temporal_pivot","STATUS_YR", 'ABNJ')

# update the fields so that they show '0' as opposed to blank cells
# define the codeblock1

in_codeblock1 = """
def updateValue(value):
  if value == None:
   return '0'
  else: return value"""
コード例 #15
0
def streamAccess():

    util.log(
        "Starting streamAccess module ---------------------------------------------------------------"
    )
    # note - module uses an altered version of the subwatershed geometry per Jen Antak
    # - this allows for capturing the full length of the stream whereas the confluences were being cut off previously

    # clip access polygons to subwatersheds
    util.log("Clipping access polygons to subwatersheds")
    accesspoly_clip = arcpy.Clip_analysis(
        config.stream_access_poly, config.subwatersheds_StreamAccess_alt,
        config.temp_gdb + r"\accesspoly_clip")

    # intersect city streams with subwatersheds
    util.log("Intersecting streams with subwatersheds")
    in_features = [config.streams, config.subwatersheds_StreamAccess_alt]
    streams_sect = arcpy.Intersect_analysis(in_features,
                                            config.temp_gdb + r"\streams_sect",
                                            "NO_FID")

    # TODO: redundancy in this area here - stream_sect and accesspoly_sect are the same thing
    #intersect city streams with accessiblity polygons
    util.log("Intersecting city streams with accessibility polygons")
    in_features = [streams_sect, accesspoly_clip]
    accesspoly_sect = arcpy.Intersect_analysis(
        in_features, config.temp_gdb + r"\accesspoly_sect", "NO_FID", "#",
        "LINE")

    # intersect accessible streams with subwatersheds
    util.log("Intersecting stream accessibility with subwatersheds")
    in_features = [accesspoly_sect, config.subwatersheds_StreamAccess_alt]
    access_sect = arcpy.Intersect_analysis(in_features,
                                           config.temp_gdb + r"\access_sect",
                                           "NO_FID")

    # add field to define access values which more explicitly align with output
    util.log("Adding streamAccess_Status field")
    arcpy.AddField_management(access_sect, "streamAccess_Status", "TEXT", "",
                              "", 20)

    # populate streamAccess_Status field values
    util.log("Populating streamAccess_Status field")
    field_list = ["Curr_Acc", "Hist_Acc", "streamAccess_Status"]
    with arcpy.da.UpdateCursor(access_sect, field_list) as rows:
        for row in rows:
            if row[0] is None and row[1] == "n":
                row[2] = "Hist_Innacessible"
            elif row[0] == "n" and row[1] == "n":
                row[2] = "Hist_Innacessible"
            elif row[0] == "n" and row[1] == "y":
                row[2] = "Hist_Accessible"
            elif row[0] == "p":
                row[2] = "Curr_Partial"
            elif row[0] == "y":
                row[2] = "Curr_Full"
            else:
                row[2] = "Unknown"
            rows.updateRow(row)

    #summarize data
    util.log("Creating summary table for all streams")
    streams_summary = arcpy.Statistics_analysis(
        streams_sect, config.temp_gdb + r"\streams_summary_table",
        "Shape_Length SUM", "WATERSHED")
    arcpy.AddField_management(streams_summary, "WSHED_TOTAL_LEN", "DOUBLE")

    input_fields = ["SUM_Shape_Length", "WSHED_TOTAL_LEN"]
    with arcpy.da.UpdateCursor(streams_summary, input_fields) as rows:
        for row in rows:
            row[1] = row[0]
            rows.updateRow(row)

    util.log("Creating summary table for accessible streams")
    access_summary = arcpy.Statistics_analysis(
        access_sect, config.temp_gdb + r"\access_summary_table",
        "Shape_Length SUM", "WATERSHED;streamAccess_Status")

    # pivot info
    util.log("Creating pivot table")
    access_final = arcpy.PivotTable_management(
        access_summary, "WATERSHED", "streamAccess_Status", "SUM_Shape_Length",
        config.temp_gdb + r"\access_final")

    util.log("Adding Shape Length from city streams")
    arcpy.JoinField_management(access_final, "WATERSHED", streams_summary,
                               "WATERSHED", "WSHED_TOTAL_LEN")

    # calculate % values
    util.log("Calc % fully accessible")
    rate_field1 = "Pcnt_Full_Access"
    arcpy.AddField_management(access_final, rate_field1, "Double")
    cursor_fields = [
        "Curr_Full", "Curr_Partial", "WSHED_TOTAL_LEN", rate_field1
    ]
    with arcpy.da.UpdateCursor(access_final, cursor_fields) as rows:
        for row in rows:
            row[3] = (row[0] / row[2]) * 100
            rows.updateRow(row)

    util.log("Calc % partially accessible")
    rate_field2 = "Pcnt_Partial_Access"
    arcpy.AddField_management(access_final, rate_field2, "Double")
    cursor_fields = [
        "Curr_Full", "Curr_Partial", "WSHED_TOTAL_LEN", rate_field2
    ]
    with arcpy.da.UpdateCursor(access_final, cursor_fields) as rows:
        for row in rows:
            row[3] = (row[1] / row[2]) * 100
            rows.updateRow(row)

    # generate WHI scores
    util.log("Calc WHI full access score")
    score_field1 = "fully_accessible_score"
    arcpy.AddField_management(access_final, score_field1, "DOUBLE")
    with arcpy.da.UpdateCursor(access_final,
                               [rate_field1, score_field1]) as rows:
        for row in rows:
            row[1] = calc.streamAccess1_count(row[0])
            rows.updateRow(row)

    util.log("Calc WHI full and partial access score")
    score_field2 = "all_accessible_score"
    arcpy.AddField_management(access_final, score_field2, "DOUBLE")
    with arcpy.da.UpdateCursor(
            access_final, [rate_field1, rate_field2, score_field2]) as rows:
        for row in rows:
            row[2] = calc.streamAccess2_count(row[0], row[1])
            rows.updateRow(row)

    # convert output to table if needed
    util.tableTo_primaryOutput(access_final)

    util.log("Cleaning up")
    arcpy.Delete_management("in_memory")

    util.log(
        "Module complete ---------------------------------------------------------------"
    )
コード例 #16
0
SalidaFinalCanarias = base + r"/" + FGDBcanarias + r"\MunicipiosMeteo"

# comprueba si existe la FGDB de trabajo
if arcpy.Exists(FGDB):
    arcpy.Delete_management(FGDB)
arcpy.CreateFileGDB_management(base, FGDB)
print("creada FGDB")

print("Empezamos el procesado del resto de variables")
####################################################
#restovariables
arcpy.md.MakeMultidimensionalRasterLayer(netCDFIberia, multicrf, lista, "ALL")
arcpy.sa.ZonalStatisticsAsTable(sscc, clave, multicrf, zonal_tabla, "DATA",
                                "MEAN", "ALL_SLICES")

arcpy.PivotTable_management(zonal_tabla, clave + ";" + variable2, 'StdTime',
                            'MEAN', salidapivotar)

arcpy.CopyFeatures_management(sscc, muniCopy)
num = 0
for var in lista:
    arcpy.conversion.TableToTable(salidapivotar, base + r"/" + FGDB,
                                  nombrepivotar + var,
                                  "Variable = '" + var + "'")
    #arcpy.management.MakeTableView(salidapivotar, "zonal_pivotar_View"+var, "Variable = '"+var+"'", None)
    fieldList = [
        f.name for f in arcpy.ListFields(
            base + r"/" + FGDB + r"/" + nombrepivotar + var, "*Std*")
    ]
    print(listaalias[num])
    fieldlist2 = []
    for field in fieldList:
コード例 #17
0
## Create DM Status Reports
################################################################################

# if record is marked as archived and not listed as dm ready, tag as archived
with arcpy.da.UpdateCursor(elementRecords, ["dm_stat", "archive"]) as cursor:
    for row in cursor:
        if row[1] == 'Y' and row[0] != "dmready":
            row[0] = "Archived"
            cursor.updateRow(row)

## Create pivot table
stats = arcpy.Statistics_analysis(elementRecords,
                                  os.path.join(env.workspace, "stats"),
                                  "refcode COUNT;dm_stat COUNT",
                                  "refcode;dm_stat")
pivot = arcpy.PivotTable_management(stats, 'refcode', 'dm_stat', 'FREQUENCY',
                                    os.path.join(env.workspace, "pivot"))

# add field and populate with 0 if all columns are null
arcpy.AddField_management(pivot, "total_records", "DOUBLE", field_length=3)
with arcpy.da.UpdateCursor(pivot, [
        "total_records", "Archived", "dmpend", "dmproc", "dmready", "dr",
        "idprob", "idrev"
]) as cursor:
    for row in cursor:
        newrow = [0 if x == None else x for x in row]
        cursor.updateRow(newrow)

# populate total records field with sum of records
with arcpy.da.UpdateCursor(pivot, [
        "total_records", "Archived", "dmpend", "dmproc", "dmready", "dr",
        "idprob", "idrev"
コード例 #18
0
#Create pivot table so designation percent intersect become columns
in_table = 'Draft_Regional_PDA_Eligibility_Summary_2019'
fields = ['Join_ID']
pivot_field = 'Designation'
value_field = 'Percent_Intersect'
out_table = r'\\Mac\Home\Documents\Section\Planning\Growth_Framework_Analysis\Growth_Framework_Analysis_Areas.gdb\Draft_Regional_PDA_Eligibility_Pivot_2019'

pivot_field_args = {
    'in_table': in_table,
    'fields': fields,
    'pivot_field': pivot_field,
    'value_field': value_field,
    'out_table': out_table
}

arcpy.PivotTable_management(**pivot_field_args)

# Add field for eligibility flag
add_field_args = {
    'in_table': out_table,
    'field_name': 'Designation',
    'field_type': 'TEXT',
    'field_precision': None,
    'field_length': None,
    'field_length': 50,
    'field_alias': 'Designation',
    'field_is_nullable': 'NULLABLE',
    'field_is_required': 'NON_REQUIRED',
    'field_domain': None
}
コード例 #19
0
    # join area field to template table
    joinTable = arcpy.JoinField_management(template, "Tier_Resilience",
                                           transpose, "Tier_Resilience",
                                           "Area")

    # fill null values with 0 and add % sign to all values
    with arcpy.da.UpdateCursor(joinTable, "Area") as cursor:
        for row in cursor:
            if row[0] is None or row[0] == "0":
                cursor.deleteRow()
    with arcpy.da.UpdateCursor(joinTable, "Area") as cursor:
        for row in cursor:
            row[0] = str(row[0]) + '%'
            cursor.updateRow(row)

    # pivot table for final format
    arcpy.PivotTable_management(
        joinTable, "Resilience_Score", "Tier", "Area",
        os.path.join(outGDB, n + "_" + "climate_resilience"))

# delete temporary feature classes
deleteFC = [
    os.path.join(env.workspace, "properties_dissolve"),
    os.path.join(env.workspace, "tab_area"),
    os.path.join(env.workspace, "transpose"),
    os.path.join(env.workspace, "template")
]

for FC in deleteFC:
    arcpy.Delete_management(FC)