示例#1
0
def createSumTbl(table):
    # Calculate the Attribute Fields for the Cloned DCN Data
    arcpy.Statistics_analysis(table, table + "_area_v1", [[
        "Shape_Area",
        "SUM",
    ]], "SUMAREA")
    arcpy.Statistics_analysis(table, table + "_provtot_v1", [[
        "SUMPROV",
        "FIRST",
    ]], "SUMPROV")
    arcpy.AddField_management(table + "_area_v1", "VERSION", "TEXT", "", "",
                              "50", "", "NULLABLE", "NON_REQUIRED", "")
    arcpy.AddField_management(table + "_area_v1", "AREADBL", "DOUBLE", "", "",
                              "", "", "NULLABLE", "NON_REQUIRED", "")
    arcpy.CalculateField_management(table + "_area_v1", "AREADBL",
                                    "round(!SUM_Shape_Area!/1000000, 0)",
                                    "PYTHON", "")
    arcpy.CalculateField_management(table + "_area_v1", "SUMAREA",
                                    calcSumFldArea, "PYTHON", "")
    arcpy.AddField_management(table + "_provtot_v1", "VERSION", "TEXT", "", "",
                              "50", "", "NULLABLE", "NON_REQUIRED", "")
    if tblName == "tbl_temp_web_prov_tot_cur":
        arcpy.TableToTable_conversion(table + "_area_v1", outFolder,
                                      tblFinalSum)
        arcpy.CalculateField_management(table + "_area_v1", "VERSION",
                                        "'" + "CURRENT" + "'", "PYTHON")
        arcpy.CalculateField_management(table + "_provtot_v1", "VERSION",
                                        "'" + "CURRENT" + "'", "PYTHON")

    elif tblName == "tbl_temp_web_prov_tot_prev":
        arcpy.CalculateField_management(table + "_area_v1", "VERSION",
                                        "'" + "PREVIOUS" + "'", "PYTHON")
        arcpy.CalculateField_management(table + "_provtot_v1", "VERSION",
                                        "'" + "PREVIOUS" + "'", "PYTHON")
def CalcMeans(inTable, workspace, theField, numHouses):
    arcpy.AddMessage("\tCalculating means for " + theField + " in table: " +
                     inTable + "...")
    if workspace == '':
        workspace = "in_memory"

    elif workspace == "in_memory":
        mTable = workspace + os.sep + "mean_value"

    else:
        mTable = workspace + os.sep + "mean_value.dbf"

    # Get value from summary table

    if not numHouses == "":
        arcpy.Statistics_analysis(inTable, mTable, theField + " MEAN",
                                  "HOUSES")
        theField = "MEAN_" + theField
        theField = arcpy.ValidateFieldName(theField, workspace)
        rows = arcpy.SearchCursor(mTable, "HOUSES = " + str(numHouses))
    else:
        arcpy.Statistics_analysis(inTable, mTable, theField + " MEAN")
        # Get value from summary table
        theField = "MEAN_" + theField
        theField = arcpy.ValidateFieldName(theField, workspace)
        rows = arcpy.SearchCursor(mTable)

    arcpy.AddMessage("\tGetting value for " + theField)
    row = rows.next()
    theMean = row.getValue(theField)

    ##      Delete the row and cursor
    del row, rows
    return theMean
示例#3
0
def HighCrashRate_BlockGroup(Para_Workspace, Para_Blockgroups,
                             Para_CountyBoundary, Para_MajorRoad, Para_Crash,
                             Para_Distance):
    global intermediate
    local_intermediate = []
    ## Calculate the road length for each block
    arcpy.MakeFeatureLayer_management(Para_Blockgroups, "blockgroups")
    arcpy.SelectLayerByLocation_management("blockgroups",
                                           "HAVE_THEIR_CENTER_IN",
                                           Para_CountyBoundary, "",
                                           "NEW_SELECTION")
    arcpy.CopyFeatures_management("blockgroups", "M_blg")
    arcpy.Intersect_analysis(["M_blg", Para_MajorRoad], "M_mjr_seg")
    statsFields = [["Shape_Length", "SUM"]]
    arcpy.AddMessage("complete")
    arcpy.Statistics_analysis("M_mjr_seg", "M_mjr_seg_stat", statsFields,
                              "FID_M_blg")
    arcpy.AddField_management("M_mjr_seg_stat", "Miles", "FLOAT")
    arcpy.CalculateField_management("M_mjr_seg_stat", "Miles",
                                    '!SUM_Shape_Length! * 0.000621371192')
    JoinField = ["SUM_Shape_Length", "Miles"]
    arcpy.JoinField_management("M_blg", "OBJECTID", "M_mjr_seg_stat",
                               "FID_M_blg", JoinField)

    ## Calculate the crash rate for each block
    arcpy.Buffer_analysis(Para_MajorRoad, "major_roads_Buffer", Para_Distance)
    arcpy.Clip_analysis(Para_Crash, "major_roads_Buffer", "crashes_300feet")
    arcpy.SpatialJoin_analysis("M_blg", "crashes_300feet", "M_blg_crashcount",
                               "JOIN_ONE_TO_ONE")
    arcpy.AddField_management("M_blg_crashcount", "crash_rate", "FLOAT")
    arcpy.CalculateField_management("M_blg_crashcount", "crash_rate",
                                    '!Join_Count!/!Miles!')
    arcpy.AddMessage("crash rate compute complete")
    crash_rate_statField = [["crash_rate", "MEAN"], ["crash_rate", "MIN"],
                            ["crash_rate", "MAX"]]
    arcpy.Statistics_analysis("M_blg_crashcount", "M_blg_crashcount_stat",
                              crash_rate_statField)

    new_item = []
    for item in ["MEAN", "MIN", "MAX"]:
        new_item.append(item + "_crash_rate")

    Stat_list = list(arcpy.da.SearchCursor("M_blg_crashcount_stat", new_item))

    LowRate = Stat_list[0][1]
    MidRate = (Stat_list[0][0] + Stat_list[0][1]) / 2
    HighRate = (Stat_list[0][1] + Stat_list[0][2]) / 2
    SQL_highrate = '"crash_rate" > {value}'.format(value=HighRate)
    arcpy.FeatureClassToFeatureClass_conversion("M_blg_crashcount",
                                                Para_Workspace,
                                                "HighCrashRate", SQL_highrate)
    arcpy.AddMessage("Complete the Part 1: HighCrashRate_BlockGroup")
    local_intermediate = [
        "M_blg", "M_mjr_seg", "M_mjr_seg_stat", "major_roads_Buffer",
        "crashes_300feet", "M_blg_crashcount", "M_blg_crashcount_stat",
        "HighCrashRate"
    ]
    intermediate = intermediate + local_intermediate
 def genEstad_Finales(capaInteres, campoEstad, gdbRecepcion):
     arcpy.Statistics_analysis(capaInteres,
                               gdbRecepcion + "\\Estad_Basica_Agrupacion1",
                               campoEstad + " SUM", 'Groups')
     arcpy.Statistics_analysis(capaInteres,
                               gdbRecepcion + "\\Estad_Basica_Agrupacion2",
                               campoEstad + " SUM", 'Reclas_Groups')
     arcpy.Statistics_analysis(capaInteres,
                               gdbRecepcion + "\\Estad_Basica_Agrupacion3",
                               campoEstad + " SUM", 'Reclas_Groups_2')
示例#5
0
def Precip(watershed_poly):
    # Local variables
    precipchago = r'D:\Jackson\StorageCapacity\ToolData\precipchago'
    precip_raster = Raster(precipchago)
    prec_table = 'prec_table'
    prec_table_avg = 'prec_table_avg'

    if precipchago == '#' or not precipchago:
        precipchago = "precipchago"  # provide a default value if unspecified

    # Process
    arcpy.AddMessage("Extract precipitation values from raster to table")
    arcpy.ExtractValuesToTable_ga(watershed_poly, precip_raster, prec_table,
                                  "", "true")
    arcpy.AddField_management(watershed_poly, "Prec_ID", "SHORT", "", "", "",
                              "", "NULLABLE", "NON_REQUIRED")
    arcpy.CalculateField_management(watershed_poly, "Prec_ID", 1, "PYTHON", "")
    arcpy.AddMessage("Determine Average precipitation across watershed")
    arcpy.Statistics_analysis(prec_table, prec_table_avg, "Value MEAN", "")
    arcpy.AddField_management(prec_table_avg, "Av_Prec", "DOUBLE", "", "", "",
                              "", "NULLABLE", "NON_REQUIRED")
    arcpy.AddField_management(prec_table_avg, "ID")
    arcpy.CalculateField_management(prec_table_avg, "ID", 1, "PYTHON")
    arcpy.CalculateField_management(prec_table_avg, "Av_Prec", "!MEAN_Value!",
                                    "PYTHON", "")
    arcpy.JoinField_management(watershed_poly, "Prec_ID", prec_table_avg, "ID",
                               "Av_Prec")
示例#6
0
def getLastSynchronizationTime(workspace, tableList):
    '''Looks at the existing records in the SDE and returns the latest synchronization time'''
    arcpy.AddMessage('\t-Checking Last Sychronization')
    arcpy.env.workspace = workspace
    statTables = []
    #Dummy value to compare time
    lastSync = datetime.datetime.fromtimestamp(0)
    for table in tableList:
        #Skip if empty table (i.e., no rows)
        arcpy.AddMessage('\t\t-Checking sync on {0}'.format(table))
        #Just use the last part of the table name
        tableName = table.split(".")[-1]
        rowCheck = arcpy.GetCount_management(tableName)
        rowCount = int(rowCheck.getOutput(0))
        if rowCount > 0:
            statTable = arcpy.Statistics_analysis(tableName, r'in_memory\stat_{0}'.format(tableName), "SYS_TRANSFER_DATE MAX")
            statTables.append(statTable)
    for s in statTables:
        with arcpy.da.SearchCursor(s, ['MAX_sys_transfer_date']) as rows:
            for row in rows:
                thisDate = row[0]
                if thisDate > lastSync:
                    lastSync = thisDate
    for s in statTables:
        arcpy.Delete_management(s)
    #If we get no results (i.e., no tables) return None
    if lastSync == datetime.datetime.fromtimestamp(0):
        return None
    else:
        arcpy.AddMessage('\t\t-Last Synchornized on {0}'.format(createTimestampText(lastSync)))
        return lastSync
示例#7
0
def joinParcelsToBuildings(pathBuildingFootprints, parcels, pathGDB):
	tempFeaturesList = []
	# Process: Copy Footprint File
	print('Copying input building footprints.')
	buildingFootprints = os.path.join(pathGDB, os.path.splitext(pathBuildingFootprints)[0] + '_Copy')
	print('Building footprint copy path: ', buildingFootprints)
	arcpy.CopyFeatures_management(pathBuildingFootprints, buildingFootprints)
	tempFeaturesList.append(buildingFootprints)

	# Process: Standard Spatial Join
#	print('Spatial joining buildings to parcels.')
#	buildingFootprintsAPN = os.path.join(pathGDB, os.path.splitext(pathBuildingFootprints)[0] + '_ParcelJoin')
#	arcpy.SpatialJoin_analysis(buildingFootprints, parcels, buildingFootprintsAPN, "JOIN_ONE_TO_ONE", "KEEP_COMMON", "", "WITHIN", "", "")

	# Process: Spatial Join (Largest Overlap Toolbox)
	print('Spatial joining buildings to parcels.')
	buildingFootprintsAPN = os.path.join(pathGDB, os.path.splitext(pathBuildingFootprints)[0] + '_ParcelJoin')
	arcpy.SpatialJoinLargestOverlap(buildingFootprints, parcels, buildingFootprintsAPN, "false", "LARGEST_OVERLAP")

	# Process: Summary Statistics
	print('Performing summary statistics of buildings in parcels.')
	buildingSummaryStatistics = os.path.join(pathGDB, os.path.splitext(buildingFootprintsAPN)[0] + '_summstats')
	arcpy.Statistics_analysis(buildingFootprintsAPN, buildingSummaryStatistics, "Area_ft SUM", "APN")
	tempFeaturesList.append(buildingSummaryStatistics)

	# Process: Join Field for Summary Statistics
	print('Joining summary statistics to building footprints.')
	arcpy.JoinField_management(buildingFootprintsAPN, "APN", buildingSummaryStatistics, "APN", "FREQUENCY;SUM_AREA")

	# Delete temporary files
	for tempFeature in tempFeaturesList:
		arcpy.Delete_management(tempFeature)

	return buildingFootprintsAPN
def createSummaryTable(gdb, base_layer, table_name, summary_field, layer,
                       message):
    """Create a summary table for a provided layer and field"""
    out_table = os.path.join(gdb, '{}_{}'.format(base_layer, table_name))
    stats_fields = [[summary_field, 'SUM']]
    arcpy.Statistics_analysis(layer, out_table, stats_fields)
    return out_table
def CalcCorridorDistance(pointLayer, workspace):
    arcpy.AddMessage("\tCalculating distances between structures...")

    if workspace == '':
        workspace = arcpy.env.Workspace

    elif workspace == "in_memory":
        sumTable = workspace + os.sep + "Core_summary"
        iTable = workspace + os.sep + "Iteration_summary"
    else:
        sumTable = workspace + os.sep + "Core_summary.dbf"
        iTable = workspace + os.sep + "Iteration_summary.dbf"
    cLayer = workspace + os.sep + "coresLayer"

    whereClause = '"NEAR_DIST" <> -1'

    # Calculate distance between points
    arcpy.Near_analysis(pointLayer, pointLayer)

    # Process: Make Feature Layer ...
    arcpy.MakeFeatureLayer_management(pointLayer, cLayer, "", "",
                                      "Input_FID Input_FID VISIBLE NONE")

    # Process: Select Near Distance <> -1 (distance between a point and itself)...
    arcpy.SelectLayerByAttribute_management(cLayer, "NEW_SELECTION",
                                            whereClause)

    # Process: Summary Statistics on selected set...
    arcpy.Statistics_analysis(cLayer, iTable, "NEAR_DIST MEAN")

    arcpy.Delete_management(cLayer, "")

    return iTable  #send iteration table back for further processing
示例#10
0
def summaryOfField(inputFC, outputTable, whereClause, transfo):

    layerName = inputFC + '_Summary_' + transfo
    arcpy.MakeFeatureLayer_management(inputFC, layerName)
    arcpy.SelectLayerByAttribute_management(layerName, "NEW_SELECTION",
                                            whereClause)

    arcpy.AddMessage("Calculating the summary of the input field")
    arcpy.Statistics_analysis(layerName, outputTable, [["Join_Count", "SUM"]])

    #On selectionne le SUM_Join_Count dans le ouputTable
    result = arcpy.GetCount_management(outputTable)
    count = int(result.getOutput(0))
    total = 0
    field = ["SUM_Join_Count"]
    if count > 0:
        cursor = arcpy.da.SearchCursor(outputTable, field)
        for row in cursor:
            if row[0] is not None:
                total = row[0]
            else:
                total = 0
    else:
        #La table est vide
        total = 0

    arcpy.AddMessage(
        "Summary of field Join_Count has been done successfully. Sum = " +
        str(total))

    arcpy.Delete_management(layerName)

    return total
def get_min_max_stream_order_for_species(species_name, species_range, huc12_layer):
    """
        Given a path to a species range and an input feature layer of huc 12s, gets the minimum MAX_StreamOrde in
        the range
    :param species_name:
    :param species_range:
    :param huc12_layer:
    :return:
    """
    # select HUC12's by species occurrence and run (min) stats
    select_species_range(species_range, species_name, huc12_layer)


    stats_table = "Stats_{}".format(species_name)
    log.info(species_name + ' min(max(stream order)) summary statistics\t{}'.format(datetime.datetime.now().time()))
    arcpy.Statistics_analysis(huc12_layer, stats_table, [["max_stream_order", "MIN"]])

    cursor = arcpy.da.SearchCursor(stats_table, ['MIN_max_stream_order'])
    min_stream = None
    for row in cursor:  # there's only one row, so this method should be OK to get the correct value
        min_stream = row[0]

    log.info("Min Stream Order for {} is {}".format(species_name, min_stream))
    if min_stream < 1:
        raise ValueError("Min Stream for {} is {}, which is not a valid stream order - This likely means the stream data"
                         "hasn't been filtered properly (to only real streams), or the join is misconfigured and streams"
                         "outside of the species range are being included.".format(species_id, min_stream))
    return min_stream
def summarizeGumByCategory():

    layer = "in_memory\gumcount_0plus"
    #table = "GumCountByCategory"
    table = "in_memory/CityGumCountByCategory"
    #table = publishGDB + "/CityGumCountByCategory"
    #arcpy.Delete_management(layer)

    arcpy.MakeFeatureLayer_management("Sites", "lyr")
    arcpy.SelectLayerByAttribute_management("lyr", "NEW_SELECTION",
                                            "GUM_COUNT_ACTUAL > 0")
    arcpy.CopyFeatures_management("lyr", layer)

    #arcpy.Statistics_analysis(layer, "GumCountByCategory", "GUM_COUNT_ACTUAL SUM", "SIC_NAME")
    arcpy.Statistics_analysis(layer, table, "GUM_COUNT_ACTUAL SUM", "SIC_NAME")
    arcpy.AddField_management(table, "AVG_GUM_COUNT", "SHORT")

    fields = ['SIC_NAME', 'FREQUENCY', 'SUM_GUM_COUNT_ACTUAL', 'AVG_GUM_COUNT']

    with arcpy.da.UpdateCursor(table, fields) as cursor:
        for row in cursor:
            print(row)
            if row[0] == None:
                row[0] = "ADDRESS POINT"
            row[3] = round(row[2] / row[1])
            cursor.updateRow(row)

    sort_fields = [["SUM_GUM_COUNT_ACTUAL", "DESCENDING"]]

    sortTable = publishGDB + "/CityGumCountByCategory"
    arcpy.Sort_management(table, sortTable, sort_fields)
示例#13
0
def downsample(city_id):
    log('Downsampling points for %s', city_id)

    output_dir = join(DOWNSAMPLE_DIR, str(city_id))
    if not exists(output_dir):
        os.makedirs(output_dir)
        log('Created %s', output_dir)
    else:
        log('%s already exists!', output_dir)

    samples_shp = join(LATLNGS_SHP_DIR, '%s.shp' % city_id)

    downsampling_fishnet_poly_shp = join(output_dir,
                                         'downsampling_fishnet.shp')
    downsampling_fishnet_label_shp = join(output_dir,
                                          'downsampling_fishnet_label.shp')

    if not exists(downsampling_fishnet_poly_shp):
        log('Creating fishnet...')
        desc = arcpy.Describe(samples_shp)
        arcpy.CreateFishnet_management(
            downsampling_fishnet_poly_shp, str(desc.extent.lowerLeft),
            str(desc.extent.XMin) + ' ' + str(desc.extent.YMax + 10), '0.0012',
            '0.0012', '0', '0', str(desc.extent.upperRight), 'LABELS', '#',
            'POLYGON')
        log('Fishnet creation complete')

    samples_identity_shp = join(output_dir, 'samples_identity.shp')
    if not exists(samples_identity_shp):
        log('Computing identity...')
        arcpy.Identity_analysis(samples_shp, downsampling_fishnet_poly_shp,
                                samples_identity_shp)
        log('Identity complete')

    samples_stats = join(output_dir, 'samples_stats')
    if not exists(join(output_dir, 'info')):
        log('Starting summary statistics...')
        arcpy.Statistics_analysis(samples_identity_shp, samples_stats,
                                  [['price', 'MEAN']], 'FID_downsa')
        log('Summary statistics complete')

    log('Detecting if join has already been done...')
    join_done = False
    fields = arcpy.ListFields(downsampling_fishnet_label_shp)
    for field in fields:
        if field.name == 'MEAN_PRICE': join_done = True

    if not join_done:
        log('Performing table join on FID:FID_DOWNSA...')
        arcpy.JoinField_management(downsampling_fishnet_label_shp, 'FID',
                                   samples_stats, 'FID_DOWNSA', ['MEAN_PRICE'])
        log('Table join on FID:FID_DOWNSA done.')

    log('Defining projection...')
    arcpy.DefineProjection_management(downsampling_fishnet_label_shp,
                                      PROJECTION_FILE)

    log('FINISHED downsampling %s', city_id)
    return downsampling_fishnet_label_shp
    log('======================END==========================')
示例#14
0
def getCrudeMax(wrkspc, currentDataSet, document):
    # Return the names of the states with the highest and second highest crude rate
    mxd = arcpy.mapping.MapDocument(document)  # access map document
    df = arcpy.mapping.ListDataFrames(mxd)[0]  # get first data frame
    layer = arcpy.mapping.ListLayers(mxd, "", df)[1]  # get states layer
    out = wrkspc + "\\sumstats"  # define an output path
    rateField = "US_states.CRUDE_RATE"  # specify which column to get statistics on
    path = arcpy.Statistics_analysis(
        layer, out, [[rateField, "MAX"]])  # use statistics function to get max
    fields = arcpy.ListFields(path)  # get list of fields in new table
    maximum = [
        row[0] for row in arcpy.da.SearchCursor(path, [fields[3].name])
    ][0]  # get actual number out of table
    second = 0  # initialize second highest value
    nameField = currentDataSet + ".csv.State"  # # define column to look at
    state = "unknown"  # initialize highest state
    state2 = "unknown"  # initialize secod highest state
    cursor = arcpy.da.SearchCursor(
        layer, [nameField, rateField],
        rateField + " > 15")  # create cursor to go over states and values
    for row in cursor:
        if row[1] == maximum:  # if the value matches the maximum
            state = row[0]  # define the highest state
        else:  # otherwise look for the seconnd highest value and state
            if row[1] > second:  # if the value is higher than the current recorded value
                second = row[1]  # define the value
                state2 = row[0]  # define the state
    del mxd
    return [state, state2]  # return both states
示例#15
0
def summary(inTbl, outTbl, fields_stat):
    """
    Statistics analysis tool of ArcGIS toolbox
    
    Available statistics types are:
    * SUM   - Adds the total value for the specified field. 
    * MEAN  - Calculates the average for the specified field. 
    * MIN   - Finds the smallest value for all records of the specified field. 
    * MAX   - Finds the largest value for all records of the specified field. 
    * RANGE - Finds the range of values (MAX minus MIN) for the specified field. 
    * STD   - Finds the standard deviation on values in the specified field.
    * COUNT - Finds the number of values included in statistical calculations.
    This counts each value except null values. To determine the number of null
    values in a field, use the COUNT statistic on the field in question,
    and a COUNT statistic on a different field which does not contain nulls
    (for example, the OID if present), then subtract the two values.
    * FIRST - Finds the first record in the Input Table and uses its
    specified field value. 
    * LAST  - Finds the last record in the Input Table and uses its
    specified field value.
    """

    arcpy.Statistics_analysis(in_table=inTbl,
                              out_table=outTbl,
                              statistics_fields=fields_stat)

    return outTbl
示例#16
0
def fc_stats(workspace, fc, stat_fields, case_field):
    """Returns a table of statistics for the specifed feature class, fields, 
    and cases.
    
    Usage:
    Uses the arcpy "Summary Statistics" tool. See this tool's help for a list 
    of statistics. 
    
    Args:
        workspace (str):    the path to the workspace
        fc (str):           a feature class object
        stat_fields (list): a list of field, stat pairs i.e., [["Z", "MIN"], 
                            ["Z", "MAX"]]
        case_field (str):   the case field that will be used to calculate 
                            summary statistics for
    
    Returns:
        arcpy table object: 
    """
    arcpy.Statistics_analysis(in_table=fc,
                              out_table=os.path.join(workspace,
                                                     "fc_stat_table"),
                              statistics_fields=stat_fields,
                              case_field=case_field)
    fc_stat_table = arcpy.MakeTableView_management(in_table=os.path.join(
        workspace, "fc_stat_table"),
                                                   out_view="fc_stat_table")
    return fc_stat_table
示例#17
0
def CreatePivotTable(inTable, outTable):
    '''function that calculates statistics and creates pivot table'''

    arcpy.Statistics_analysis(inTable, outTable, "refcode COUNT;dm_stat COUNT",
    "refcode;dm_stat")

    arcpy.PivotTable_management(outTable, 'refcode', 'dm_stat', 'FREQUENCY',
    os.path.join(env.workspace, "pivotTable"))
示例#18
0
def PlacePointsWithRaster(input_geodatabase, inputRaster, numberOfPoints):
    #Environmental settings
    import arcpy
    import random
    arcpy.env.workspace = input_geodatabase
    arcpy.env.overwriteOutput = True

    #Check the existence of data
    if not inputRaster:
        return "Raster does not exist!"

    #Create points for every raster grid with the raster value
    allPoints = arcpy.conversion.RasterToPoint(inputRaster, 'pointset')

    #Get the sum of all grid values
    stats = arcpy.Statistics_analysis(allPoints, 'stats',
                                      [['grid_code', 'SUM']])
    with arcpy.da.SearchCursor(stats, ["SUM_grid_code"]) as cursor:
        for row in cursor:
            SUM = row[0]
    del cursor, row

    #Generate random numbers from the range between 0 and the sum.
    #The length of the list is equal to the number of points should be assigned at the end.
    randomList = []
    for i in range(1, numberOfPoints + 1):
        n = random.uniform(0, SUM)
        randomList.append(n)
    #Sort the list so that the list can be gone over just once
    randomList.sort()

    #This block attempts to find which point these random numbers will fall on.
    #Each raster grid value is added up one by one and compared with the random numbers.
    #If one random number falls into one of the intervals, that point associated with this interval will be exported to the final output.
    if numberOfPoints > 0:
        output = arcpy.management.CreateFeatureclass(input_geodatabase,
                                                     'output', 'POINT')
        with arcpy.da.SearchCursor(allPoints,
                                   ["OBJECTID", "grid_code"]) as cursor:
            compare = 0
            pointNumber = 0
            for row in cursor:
                compare = compare + row[1]
                if compare > randomList[pointNumber]:
                    ID = row[0]
                    select = arcpy.management.SelectLayerByAttribute(
                        allPoints, 'NEW_SELECTION', f'"OBJECTID"= {ID}')
                    arcpy.management.Append(select, output, 'NO_TEST')
                    pointNumber = pointNumber + 1
                    print(pointNumber)
                    if pointNumber == numberOfPoints:
                        break
    #Delete byporducts
    del cursor, row
    arcpy.management.Delete('pointset')
    arcpy.management.Delete('stats')
    return output
示例#19
0
def genSummaryReport(masterTable, summaryTable):
    APN_List = []
    correctionList = []
    apnDict = dict()

    now = datetime.datetime.now()
    currentYear = now.year
    nextYear = currentYear + 1
    arcpy.Statistics_analysis(masterTable, summaryTable, [['Levy', 'SUM']],
                              ['District_Name'])

    expression = "{} LIKE '{}'".format(
        arcpy.AddFieldDelimiters(summaryTable, 'District_Name'), '%')
    selectAllExpression = "{} LIKE '{}'".format(
        arcpy.AddFieldDelimiters(masterTable, 'APN'), '%')

    with arcpy.da.SearchCursor(summaryTable,
                               ['District_Name', 'FREQUENCY', 'SUM_Levy'],
                               where_clause=expression) as cursor:
        for row in cursor:
            valueList = []
            for value in row:
                if type(value) is float:
                    roundNumber = '${0:,.2f}'.format(round(value, 2))
                    valueList.append(roundNumber)
                else:
                    valueList.append(str(value))
            APN_List.append(valueList)

    with arcpy.da.SearchCursor(
            masterTable, ['APN', 'District_ID', 'Levy'],
            where_clause=selectAllExpression) as selectAllCursor:
        for selectRow in selectAllCursor:
            correctionString = []

            apn = str(selectRow[0])
            apn = apn.replace('-', '')
            apn = "{:0>10}".format(int(apn))

            disID = "{:0>6}".format(selectRow[1])

            if selectRow[2] is None:
                levy = "{:0>9}".format(0)
            else:
                levy = '{:.2f}'.format(float(selectRow[2]))
                levy = levy.replace('.', '')
                levy = "{:0>9}".format(int(levy))

            corrections = "      {} {}  {}<br>".format(apn, disID, levy)

            if disID not in apnDict:
                apnDict[disID] = []
                apnDict[disID].append(corrections)
            else:
                apnDict[disID].append(corrections)
    arcpy.AddMessage('Done with Function')
    return APN_List, apnDict
示例#20
0
def convertAltStreets(Project_Folder):
    arcpy.env.overwriteOutput = True

    Model_Inputs_gdb = os.path.join(Project_Folder, 'Model_Inputs.gdb')
    Model_Outputs_gdb = os.path.join(Project_Folder, 'Model_Outputs.gdb')

    streets_simple = os.path.join(Model_Outputs_gdb, 'Streets_Simple')
    altstreets = os.path.join(Model_Inputs_gdb, 'AltStreets')

    arcpy.env.workspace = Model_Inputs_gdb

    # Simplify AltStreets and Streets Lines
    # removes some of the nodes that make up the lines to make the files low resolution enough to be uploaded through mapmaker
    altstreets_simple = arcpy.SimplifyLine_cartography(in_features=altstreets, out_feature_class=os.path.join(Model_Outputs_gdb, "AltStreet_simple"), algorithm="POINT_REMOVE",
                                                       tolerance="5 Feet", error_resolving_option="RESOLVE_ERRORS", collapsed_point_option="KEEP_COLLAPSED_POINTS", error_checking_option="CHECK", in_barriers=[])[0]

    # add ref_zlev and dom fields for alias classification and linking to streets file
    arcpy.AddFields_management(in_table=altstreets_simple, field_description=[
                               ["REF_ZLEV", "SHORT"], ["DOM", "LONG"]])
    print('added fields to altstreets')

    arcpy.AddIndex_management(altstreets_simple, fields=[
                              "LINK_ID"], index_name="LINK_ID", unique="NON_UNIQUE", ascending="ASCENDING")
    print('added altstreet index')

    arcpy.JoinField_management(in_data=altstreets_simple, in_field="LINK_ID",
                               join_table=streets_simple, join_field="LINK_ID", fields=["NUM_STNMES"])
    print('joined altstreets to streets')

    # Filter out all of the altstreet rows that do not have multiple names
    altstreets_filter = arcpy.FeatureClassToFeatureClass_conversion(
        in_features=altstreets_simple, out_path=Model_Outputs_gdb, out_name="AltStreets_Filter", where_clause="NUM_STNMES > 1")
    print('altstreets filtered if less than 2')

    # Create Statistics Table from AltStreets_Simple
    # add in the count of all the street names added to the altstreets simple
    altstreet_stats = os.path.join(Model_Outputs_gdb, "Altstreets_Stats")
    arcpy.Statistics_analysis(in_table=altstreets_filter, out_table=altstreet_stats, statistics_fields=[
                              ["LINK_ID", "FIRST"]], case_field=["LINK_ID", "ST_NAME"])

    # Join AltStreets_Simple with AltStreets_Stats
    arcpy.JoinField_management(in_data=altstreets_simple, in_field="LINK_ID",
                               join_table=altstreet_stats, join_field="LINK_ID", fields=["NUM_STNMES"])

    arcpy.CalculateField_management(in_table=altstreets_simple, field="Dom",
                                    expression="1", expression_type="PYTHON3", code_block="", field_type="TEXT")

    # Alias streetname identifier calculation (Alias == -9)
    # MapMaker REQUIRES it to be -9 in order to find it as an alias field
    arcpy.CalculateField_management(in_table=altstreets_simple, field="REF_ZLEV",
                                    expression="-9", expression_type="PYTHON3", code_block="", field_type="TEXT")

    # updated the schema to match mapmaker schema
    updateSchema(altstreets_simple)

    # returns altstreets_final gdb location
    return arcpy.FeatureClassToFeatureClass_conversion(in_features=altstreets_simple, out_path=Model_Outputs_gdb, out_name="AltStreets_Final")[0]
示例#21
0
def fc_stats():
    outSheet = outWorkbook.add_worksheet(fc[0:30])
    outSheet.set_column(0, 4, 15)
    totalRows = arcpy.GetCount_management(fc)
    spatialRef = arcpy.Describe(fc).spatialReference
    fields = arcpy.ListFields(fc)
    stats_fields = []
    out_geom = "memory" + "\\" + str(fc) + "_" + "geom"
    arcpy.management.CheckGeometry(fc, out_geom)
    totalGeom = arcpy.management.GetCount(out_geom)
    output = "memory" + "\\" + str(fc)
    outSheet.write(0, 0, "NAME")
    outSheet.write(0, 1, fc)
    outSheet.write(1, 0, "TYPE")
    outSheet.write(1, 1, "Feature Class")
    outSheet.write(2, 0, "GCS name")
    outSheet.write(2, 1, spatialRef.name)
    outSheet.write(3, 0, "GCS type")
    outSheet.write(3, 1, spatialRef.type)
    outSheet.write(4, 0, "ROWS")
    outSheet.write(4, 1, int(str(totalRows)))
    outSheet.write(5, 0, "FIELDS")
    outSheet.write(5, 1, int(str(len(fields))))
    outSheet.write(6, 0, "GEOM ERROR")
    outSheet.write(6, 1, int(str(totalGeom)))
    outSheet.write(8, 0, "FIELD")
    outSheet.write(8, 1, "ALIAS")
    outSheet.write(8, 2, "TYPE")
    outSheet.write(8, 3, "COUNT NULL")
    outSheet.write(8, 4, "COUNT BLANK")
    arcpy.management.Delete(out_geom)
    for field in fields:
        if field.type not in ("OID", "Geometry"):
            outSheet.write(fields.index(field) + 7, 0, field.name)
            outSheet.write(fields.index(field) + 7, 1, field.aliasName)
            outSheet.write(fields.index(field) + 7, 2, field.type)
            stats_fields.append([field.name, "COUNT"])
        if field.type not in ("OID", "Geometry", "Double", "Integer",
                              "SmallInteger", "Single"):
            out_fc = "memory" + "\\" + str(fc) + "_" + str(field.name)
            expression = str(field.name) + ' IN (\'\', \' \')'
            arcpy.Select_analysis(fc, out_fc, expression)
            totalBlank = arcpy.GetCount_management(out_fc)
            if int(str(totalBlank)) > 0:
                outSheet.write(
                    fields.index(field) + 7, 4, int(str(totalBlank)))
            arcpy.management.Delete(out_fc)
    arcpy.Statistics_analysis(fc, output, stats_fields)
    fieldsOutput = arcpy.ListFields(output)
    for field in fieldsOutput:
        with SearchCursor(output, [field.name]) as cursor:
            for row in cursor:
                if fieldsOutput.index(field) > 1:
                    outSheet.write(
                        fieldsOutput.index(field) + 7, 3,
                        int(totalRows[0]) - row[0])
    arcpy.management.Delete(output)
def countCategoricalPointTypesWithinPolygons(fcPoint, pointFieldName,
                                             fcPolygon, workspace):
    #set workspace to input geodatabase
    arcpy.env.workspace = workspace
    #extract disinct values of the attribute from point feature class and save to a lsit
    try:
        #set workspace to input geodatabase
        arcpy.env.workspace = workspace
        #look through the point feature specifically at the facility name
        vals = unique_values(fcPoint, pointFieldName)
        #print unique values
        print(vals)
    except Exception as e:
        print("Error: " + e.args[0])
    #iterate through list of values
    for values in vals:
        #rename values in the field with 13 characters and stripped of white spaces
        if fieldInfo.getFieldName(index) == "status":
            #create new field for the newly mangaged name
            arcpy.AddField_management(layer, "stat", "TEXT", "", "", "13", "",
                                      "NULLABLE", "NON_REQUIRED", "")
            #use the strip function to remove white spaces
            arcpy.CalculateField_management.strip(layer, "stat", "!status!",
                                                  "PYTHON_9.3", "")
            #delete the old field
            arcpy.DeleteField_management(layer, "status")
    #define values for spatial analysis count
    points = fcPoint
    #define polygons for spatial analysis count
    polygons = fcPolygon
    #define the point field name for the analysis count
    pointID = pointFieldName
    #create an ouput field
    countField = layer
    #calculate the frequency of the event
    expression = "recalc(!FREQUENCY!)"
    #create a code block to keep track of events
    block = """def recalc(freq):
        if freq > -1:
            return freq
        else:
            return 0"""
    #use spaital join analysis to keep track of points in the polygons
    arcpy.SpatialJoin_analysis(points, polygons, "in_memory/PointsInPolys")
    #case field will then return the count per unique ID field
    arcpy.Statistics_analysis("in_memory/PointsInPolys",
                              "in_memory/SS_PointsInPolys",
                              [[pointID, "Count"]], pointID)
    #join the values to the new field
    arcpy.JoinField_management(polygons, pointID, "in_memory/SS_PointsInPolys",
                               pointID, "FREQUENCY")
    #calculate the values for the new field
    arcpy.CalculateField_management(polygons, countField, expression, "PYTHON",
                                    block)
    #delete the old field
    arcpy.DeleteField_management(polygons, "FREQUENCY")
def CreatePivotTable(inTable, outTable):
    '''function that creates pivot table'''

    # creates statistics table counting number of records for each DM status for each refcode
    arcpy.Statistics_analysis(inTable, outTable, "refcode COUNT;dm_stat COUNT",
                              "refcode;dm_stat")

    # convert into pivot table
    arcpy.PivotTable_management(outTable, 'refcode', 'dm_stat', 'FREQUENCY',
                                os.path.join(env.workspace, "pivotTable"))
def get_max_value_of_dup_seqid(duplicates, in_directory):
    out_table = in_directory + "\\summary_stats"
    arcpy.Statistics_analysis(duplicates, out_table, [["FEAT_SEQ", "MAX"]])

    max_val = 0
    with arcpy.da.SearchCursor(out_table, ["MAX_FEAT_SEQ"],
                               where_clause="OBJECTID = 1") as search_cur:
        for row in search_cur:
            max_val = search_cur[0]
    return int(max_val)
示例#25
0
文件: joins.py 项目: zonakre/gasp
def join_concelhos_contractos(tabela, folha, concelho, montante, concelhos):
    """
    Group By and Join
    """
    # Converter a tabela para dbf
    os.mkdir("C:\\areatrab")
    arcpy.ExcelToTable_conversion(tabela, "C:\\areatrab\\tabela.dbf", folha)
    arcpy.Statistics_analysis(
        "C:\\areatrab\\tabela.dbf", "C:\\areatrab\\sum_concelho.dbf", [[montante, "SUM"]], concelho)
    arcpy.JoinField_management(concelhos, "FID", "C:\\areatrab\\sum_concelho.dbf", concelho, "")
示例#26
0
def vulnerabilite_rr(in_rr_layer, in_ras, vul_infra_route, id_vul_infra_route):
    # Selection des segments touches par l'alea
    # inRRLayer = 'inRRLayer'
    # arcpy.MakeFeatureLayer_management(inRR, inRRLayer)
    # arcpy.SelectLayerByLocation_management(inRRLayer,"INTERSECT",maskZIl)

    # Copie des segments selectionnes du rr pour l'analyse de la fonctionnalite
    zi_rr2 = 'zi_rr3a'  # A modifier, Generer un nom automatiquement
    arcpy.CopyFeatures_management(in_rr_layer, zi_rr2)
    arcpy.Densify_edit(zi_rr2, "DISTANCE", "10")

    # Conversion des vertex en points
    arcpy.AddMessage('  Conversion des vertex en points')
    rr_pts = 'RRpts'  # A modifier, Generer un nom automatiquement
    arcpy.FeatureVerticesToPoints_management(zi_rr2, rr_pts, 'ALL')

    # Extraction de la profondeur de submersion aux points
    arcpy.AddMessage('  Extraction de la profondeur de submersion')
    rr_pts_extract = 'RRptsExtract'  # A modifier, Generer un nom automatiquement
    arcpy.sa.ExtractValuesToPoints(rr_pts, in_ras, rr_pts_extract)

    # Changement des valeurs NoData (-9999 ou moins) pour la valeur 0
    rows = arcpy.da.UpdateCursor(rr_pts_extract, ["RASTERVALU"])
    for row in rows:
        if row[0] <= -9999:
            row[0] = 0
        rows.updateRow(row)
    del rows

    arcpy.AddMessage(
        '  Comparaison des profondeur de submersion avec les seuils')
    out_stats_table = 'stats'
    arcpy.Statistics_analysis(
        rr_pts_extract, out_stats_table, [["RASTERVALU", "MAX"]],
        [id_vul_infra_route, 'PRECISION']
    )  # MAX (hauteurs inondation positives) ou MIN (hauteurs negatives) # , 'CRCC_NO_SEQ'

    for f in arcpy.ListFields(out_stats_table):
        if 'max' in f.name.lower():  # min ou max
            maxfield = f.name
            break

    status_rr = {}

    rows = arcpy.da.SearchCursor(out_stats_table,
                                 [id_vul_infra_route, maxfield, "PRECISION"])
    for row in rows:
        # statusRR[identifiant_route] = niveau_de_fonctionnalite[hauteur eau, inonde/non-inonde]
        # print row.getValue(IDvulInfraRou)
        status_rr[row[0]] = [
            row[1], find_class(row[1], vul_infra_route[row[2]])
        ]  # '-' en face du row de "findClass( ICI row.getValue (maxfield)" #[row.CRCC_NO_SEQ]
    del rows

    return status_rr
示例#27
0
def further_process_blended():
    env.workspace = outBlendedWS
    env.overwriteOutput = True
    GISDBASCL = r'S:\LV_Valley_Imagery\2017\SwimmingPool2017\gdb\general_data.gdb\GISDBA_SCL_STREETS'

    fcs = arcpy.ListFeatureClasses()
    arcpy.MakeFeatureLayer_management(projectAreaTiles, 'TileClipLayer')
    for fc in fcs:
        print 'clipping ' + fc
        arcpy.MakeFeatureLayer_management(fc, 'lyr')
        arcpy.AddField_management('lyr', 'YARD', 'TEXT', '', '', '5')
        arcpy.AddField_management('lyr', 'TILENAME', 'Text', '', '', '8')
        arcpy.AddField_management('lyr', 'ERROR_TYPE', 'SHORT')
        arcpy.SelectLayerByAttribute_management(
            'TileClipLayer', 'NEW_SELECTION', "BOOKSEC_PT = 'o" + fc[4:] + "'")

        arcpy.Clip_analysis(fc, 'TileClipLayer',
                            outClippedBlendedWS + '\\' + fc + '_Clip')
        arcpy.SelectLayerByAttribute_management('TileClipLayer',
                                                'CLEAR_SELECTION')

    env.workspace = outClippedBlendedWS
    env.overwriteOutput = True

    fcs = arcpy.ListFeatureClasses()
    arcpy.MakeFeatureLayer_management(projectAreaParcels, 'ProjAreaAOXLyr')
    arcpy.MakeFeatureLayer_management(GISDBASCL, 'GISDBA_SCL_STREETS')
    for fc in fcs:
        print "Performing Identity and Near Analysis on " + fc + "_Id"
        arcpy.Identity_analysis(fc, 'ProjAreaAOXLyr',
                                outClippedBlendedIDWS + '\\' + fc + '_Id',
                                'ALL', '', 'NO_RELATIONSHIPS')
        arcpy.Near_analysis(outClippedBlendedIDWS + '\\' + fc + '_Id',
                            'GISDBA_SCL_STREETS', "300 Feet", "LOCATION",
                            "NO_ANGLE", "PLANAR")

    env.workspace = outClippedBlendedIDWS
    env.overwriteOutput = True
    arcpy.MakeFeatureLayer_management(GISDBASCL, 'GISDBA_SCL_STREETS')
    fcs = arcpy.ListFeatureClasses()
    for fc in fcs:
        print "calculating frequency and stats on " + fc
        arcpy.MakeFeatureLayer_management(fc, 'lyr')
        arcpy.AddJoin_management('lyr', "NEAR_FID", 'GISDBA_SCL_STREETS',
                                 'OBJECTID', 'KEEP_ALL')
        arcpy.Frequency_analysis(
            'lyr', outClippedBlendedIDWS + '\\' + fc[:-8] + '_Frequen',
            '"{}.gridcode;{}.APN"'.format(fc,
                                          fc), '"{}.Shape_Area"'.format(fc))

        arcpy.Statistics_analysis(
            outClippedBlendedIDWS + '\\' + fc[:-8] + '_Frequen',
            outClippedBlendedIDWS + '\\' + fc[:-8] + '_TOTAREA',
            "FREQUENCY COUNT;" + "{i}_Shape_Area SUM".format(i=fc),
            "{x}_APN".format(x=fc))
def calculate_affected_indexes_layer(AOI, index_polygon):
    # Finding indexed polygons overlaped AOI
    affected_indexes_fc = "in_memory/base_affected_indexes"
    AOI_lyr = arcpy.MakeFeatureLayer_management(AOI, "AOI_lyr")
    index_polygon_lyr = arcpy.MakeFeatureLayer_management(
        index_polygon, "index_polygon_lyr")
    index_polygon_lyr.visible = False
    AOI_lyr.visible = False
    arcpy.SelectLayerByLocation_management(index_polygon_lyr, 'INTERSECT',
                                           AOI_lyr)
    arcpy.Statistics_analysis(index_polygon_lyr, "in_memory/statistics_LOD",
                              [["LOD", "MAX"]])
    statistics_LOD = [
        row[0] for row in arcpy.da.SearchCursor("in_memory/statistics_LOD",
                                                ['MAX_LOD'])
    ]
    max_lod = statistics_LOD[0]
    arcpy.AddMessage("- Max affected LOD detected " + str(max_lod))
    arcpy.CopyFeatures_management(index_polygon_lyr, affected_indexes_fc)
    arcpy.SelectLayerByAttribute_management(index_polygon_lyr,
                                            'CLEAR_SELECTION')
    base_affected_indexes_lyr = arcpy.MakeFeatureLayer_management(
        affected_indexes_fc, "base_affected_indexes_lyr")

    # Calculating bundle extent in each level
    timeStamp = time.strftime('%Y%m%d%H%M%S', time.localtime(time.time()))
    bundle_index_polygons = 'partial_index_polygons_' + str(timeStamp) + '.shp'
    arcpy.Select_analysis(index_polygon, bundle_index_polygons, '"LOD"<7')
    arcpy.AddMessage("- Partial vector indexes Level 1-6 Created !")
    with arcpy.da.SearchCursor(affected_indexes_fc,
                               ['SHAPE@', 'LOD']) as cursor:
        for row in cursor:
            if row[1] <= max_lod and row[1] >= 7:
                bundle_lod = row[1] - 7
                arcpy.SelectLayerByAttribute_management(
                    base_affected_indexes_lyr, 'NEW_SELECTION',
                    '"LOD" = ' + str(bundle_lod))
                arcpy.SelectLayerByAttribute_management(
                    index_polygon_lyr, 'NEW_SELECTION',
                    '"LOD" = ' + str(row[1]))
                arcpy.SelectLayerByLocation_management(
                    index_polygon_lyr, "INTERSECT", base_affected_indexes_lyr,
                    None, "SUBSET_SELECTION", "NOT_INVERT")
                arcpy.CopyFeatures_management(
                    index_polygon_lyr,
                    "in_memory/bundle_index_polygon_each_level")
                arcpy.Append_management(
                    ["in_memory/bundle_index_polygon_each_level"],
                    bundle_index_polygons, "TEST")
                arcpy.AddMessage(
                    "- Partial vector indexes Level {0} Created !".format(
                        row[1]))

    return bundle_index_polygons
示例#29
0
    def create_summary_stats(self, fc_name, summary_field_name):
        self.fc_name = fc_name
        self.summary_field_name = summary_field_name

        # overwrite output
        arcpy.env.overwriteOutput = True

        # Local variables:
        input_fc = "Database Servers\\FN27399_SQLEXPRESS2.gds\\HCFCD (VERSION:dbo.DEFAULT)" \
                   "\\HCFCD.DBO." + self.fc_name

        basins_GCDNA1983 = os.getcwd() + r'\Data\basins_GCDNA1983.shp'

        input_fc_GCSNA83 = "Database Servers\\FN27399_SQLEXPRESS2.gds\\HCFCD (VERSION:dbo.DEFAULT)" \
                           "\\HCFCD.DBO.Current_Clip_GCSNA83"

        Points_Basin_Join_shp = os.getcwd() + r'\Data\Points_Basin_Join.shp'

        Sum_Output_dbf = os.getcwd() + r'\Data\Sum_Output.dbf'

        try:
            # Process: Project
            print "Starting Summary_Statistics..."
            print "Projecting points..."
            arcpy.Project_management(
                input_fc, input_fc_GCSNA83,
                "GEOGCS['GCS_North_American_1983',DATUM['D_North_American_1983',"
                "SPHEROID['GRS_1980',6378137.0,298.257222101]],PRIMEM['Greenwich',0.0],"
                "UNIT['Degree',0.0174532925199433],METADATA['North America - NAD83',"
                "167.65,14.93,-47.74,86.45,0.0,0.0174532925199433,0.0,1350]]",
                "",
                "GEOGCS['HRAP_Sphere',DATUM['<custom>',SPHEROID['<custom>',"
                "6371200.0,0.0]],PRIMEM['Greenwich',0.0],UNIT['Degree',0.0174532925199433]]"
            )

            # Process: Spatial Join
            print "Spatial Join (This can take a while: please be patient)..."
            arcpy.SpatialJoin_analysis(input_fc_GCSNA83, basins_GCDNA1983,
                                       Points_Basin_Join_shp,
                                       "JOIN_ONE_TO_ONE", "KEEP_ALL")

            # Process: Summary Statistics
            arcpy.Statistics_analysis(Points_Basin_Join_shp, Sum_Output_dbf,
                                      self.summary_field_name + " MEAN",
                                      "BASIN_NAME")
            self.run_status = 'OK'
        except Exception as e:
            self.run_status = e
            root = tk.Tk()
            # allows tkMessageBox to be shown without displaying Tkinter root window
            root.withdraw()
            tkMessageBox.showinfo("Python error", e)
            print "Summary_Statistic_NET.py has errors and did not complete correctly"
        return self.run_status
示例#30
0
def getLargestNodeNumber():
    """
    Get the highest node number in the current network.
    """
    arcpy.env.workspace = WORKING_GDB
    arcpy.env.overwriteOutput = True
    arcpy.Statistics_analysis(NETWORK_SHAPEFILE,'node_number_stats',[['A','MAX'],['B','MAX']],'')
    rows = arcpy.SearchCursor('node_number_stats')
    for row in rows: #only one row
        max_node = max(row.MAX_A,row.MAX_B)
    return max_node