コード例 #1
0
    # Variables used to
    PR_bad_count = 0
    PR_good_count = 0
    PMT_good_count = 0
    PMT_bad_count = 0
    ftrcntprog = 0
    duplicate_counter = 0
    uncorrected_counter = 0
    directory_dict = {}
    duplicate_dict = {}
    uncorrected_links = []
    duplicate_directories = []
    good_link_dict = {}

    # Copies FC data to memory
    parcel_table = arcpy.MakeTableView_management(parcel_FC, 'parcel_table')
    landties_table = arcpy.MakeTableView_management(landties_FC, 'landties_table')

    landties_count = 0
    parcel_count = 0
    with arcpy.da.SearchCursor(landties_table, ['PR_LINK']) as cursor:
        for row in cursor:
            landties_count += 1
    with arcpy.da.SearchCursor(landties_table, ['PMT_LINK']) as cursor:
        for row in cursor:
            landties_count += 1
    with arcpy.da.SearchCursor(parcel_table, ['PR_LINK']) as cursor:
        for row in cursor:
            parcel_count += 1
    with arcpy.da.SearchCursor(parcel_table, ['PMT_LINK']) as cursor:
        for row in cursor:
コード例 #2
0
ファイル: CCLMapDataProcessing.py プロジェクト: KDOTGIS/pydot
'''

if __name__ == '__main__':
    pass
import arcpy, datetime, os

ws = r'\\gisdata\arcgis\GISdata\KDOT\BTP\Projects\CCL'  #change this path from CANSYSTEST
CCLUpdate = "Lyons"
now = datetime.datetime.now()
tempdb = CCLUpdate + str(now.year) + "_" + str(now.month) + "_" + str(
    now.day) + ".gdb"
arcpy.env.workspace = ws
wsouttbl = ws + "\\" + tempdb
mxd = arcpy.mapping.MapDocument("CURRENT")
df = arcpy.mapping.ListDataFrames(mxd, "Layers")[0]
arcpy.MakeTableView_management(r"Database Connections\SDEDEV.sde\SDE.CCL_Lane",
                               "Lane_tview")
arcpy.MakeTableView_management(
    r"Database Connections\SDEDEV.sde\SDE.Maint_Segment", "Maint_tview")
routelyr = "kdot_sde:oracle10g:SDE.KDOT_ROADWAYS\SDE.CMLRS"
clim = arcpy.mapping.Layer(r"\\gisdata\arcgis\GISdata\Layers\City Limits.lyr")
countylrs = arcpy.mapping.Layer(
    r"\\gisdata\arcgis\GISdata\Layers\County Linear Reference System.lyr")

arcpy.mapping.AddLayer(df, clim)
arcpy.mapping.AddLayer(df, countylrs)
clrs = "County Linear Reference System"
smlrs = "Database Connections\SDEDEV.sde\SDE.KDOT_ROADWAY\SDE.SMLRS"
arcpy.DisconnectUser("Database Connections\\kdot_sde.sde", "ALL")

arcpy.LocateFeaturesAlongRoutes_lr(
    "City Limits", "County Linear Reference System", "LRS_KEY", "0 Feet",
コード例 #3
0
        for row in cursor:
            loc_two_name = row[0]
            loc_two_filename = row[1]
            print('Calculating path distance and backlink raster for site: ' + loc_two_name)
            arcpy.MakeFeatureLayer_management(fc_two, 'source',
                                              '"{}" = \'{}\''.format(fc_two_loc_filename, loc_two_filename))
            pd_raster = path_distance('source', digital_elevation_model, vertical_factor, loc_two_filename)
            in_cost_backlink_raster = directory + r'\backlink\bl_' + loc_two_filename

            with arcpy.da.SearchCursor(fc_one, [fc_one_loc_name, fc_one_loc_filename]) as cursor:
                for row in cursor:
                    start_subtime = time()
                    loc_one_name = row[0]
                    loc_one_filename = row[1]
                    arcpy.MakeFeatureLayer_management(fc_one, 'destination', '"{}" = \'{}\''.format(fc_one_loc_filename,
                                                                                                    loc_one_filename))
                    out_cost_path = cost_path('destination', pd_raster, in_cost_backlink_raster)
                    convert(out_cost_path, loc_two_filename, loc_one_filename, loc_two_name, loc_one_name)
                    end_subtime = time()
                    subtime = end_subtime - start_subtime
                    print('Finished generating least cost path between ' + loc_two_name + ' and ' + loc_one_name +
                          ' in ' + str(subtime) + ' seconds.')

arcpy.MakeTableView_management(table, 'tableview')
arcpy.TableToExcel_conversion('tableview', directory + r'\master.xls')

log.close()
end_time = time()
print(end_time)
time_taken = end_time - start_time  # time_taken is in seconds
print('Script took ' + str(time_taken) + ' seconds to complete.')
コード例 #4
0
ファイル: Schools.py プロジェクト: waternk/EnviroAtlas_JSApp
def Schools(city, inDir, workFld):
    import traceback, time, arcpy, os
    from arcpy import env
    arcpy.CheckOutExtension('Spatial')

    #-------- DIRECTORY SETUP ------------------------------------------------
    """ Working Directory """
    try:
        arcpy.CreateFileGDB_management(str(workFld), str(city) + '_EduPts.gdb')
    except:
        print 'Schools GDB already exists'
    workDir = str(workFld) + '/' + city + '_EduPts.gdb'
    arcpy.env.workspace = workDir
    """ Report File Directory """
    reportfileDir = str(workFld) + '/Logs'
    """ Frequent Directory """
    freqDir = str(workFld) + '/' + city + '_Freq.gdb'
    """ Final Geodatabase """
    finalDir = str(workFld) + '/' + city + '_Final.gdb'
    """ Projection File Directory """
    prjDir = str(inDir) + '/Prj'
    """ Input Directory """
    inDir = str(inDir) + '/Input.gdb'
    """ Set Workspace Environments """
    arcpy.env.workspace = workDir
    arcpy.env.scratch = str(inDir) + '/Scratch.gdb'
    arcpy.env.overwriteOutput = True

    #-----------------------------------------------------------------------------
    # BEGIN ANALYSIS
    #-----------------------------------------------------------------------------
    try:
        #-------- LOGFILE CREATION ---------------------------------------------
        """ Create report file for each metric """
        tmpName = city + '_EduLowGS_' + time.strftime('%Y%m%d_%H-%M')
        reportfileName = reportfileDir + '/' + tmpName + '.txt'
        reportFile = open(reportfileName, 'w')

        try:
            loglist = sorted(f for f in os.listdir(reportfileDir)
                             if f.startswith(str(city) + '_Reuse'))
            tmpName = loglist[-1]
        except:
            tmpName = city + '_Reuse_' + time.strftime('%Y%m%d_%H-%M') + '.txt'
        reportfileName = reportfileDir + '/' + tmpName

        try:
            ReuseRF = open(reportfileName, 'a')
        except:
            ReuseRF = open(reportfileName, 'w')
            print 'Creating Reuse Log'
        """ Write out first line of report file """
        print 'Schools Start Time: ' + time.asctime()
        state = city[-2:]
        if state in ['CO', 'IL', 'KS', 'MT', 'NE', 'NH', 'OR', 'WA']:
            reportFile.write(
                "Begin with 2011 HSIP (Homeland Security Infrastructure Program) point layers for public schools, and private schools where public and private schools have been merged into one K-12 layer. Also, begin with the 2014 HSIP day cares point layer.--201203--\n"
            )
        else:
            reportFile.write(
                "Begin with 2011 HSIP (Homeland Security Infrastructure Program) point layers for daycares, public schools, and private schools where public and private schools have been merged into one K-12 layer.--201203--\n"
            )
        reportFile.write(
            "Use spatial join to add the Census Block Group GeoID of each school to the school's attribute record--201203--\n"
        )

        #-------- PROCESSING LAYERS ----------------------------------------------
        """ Set Environments """
        arcpy.env.extent = freqDir + '/LC'
        arcpy.env.snapRaster = freqDir + '/LC'
        """-------- Prepare Daycare and K12 Points -------------------------------"""
        """ Clip the Daycare and K12 points to the city boundary """
        arcpy.Clip_analysis(inDir + '/Daycares', freqDir + '/Bnd_5km',
                            'Daycares_Alb')
        arcpy.Clip_analysis(inDir + '/K12', freqDir + '/Bnd_5km', 'K12_Alb')
        reportFile.write(
            "Clip each point layer to the EnviroAtlas community boundary.--" +
            time.strftime('%Y%m%d--%H%M%S') + '--\n')
        """ Determine the Projection of the LC """
        descLC = arcpy.Describe(str(freqDir) + '/LC')
        """ Project the Daycare and K12 points into the LC's projection """
        arcpy.Project_management('Daycares_Alb', 'Daycares',
                                 descLC.spatialReference)
        arcpy.Project_management('K12_Alb', 'K12', descLC.spatialReference)
        reportFile.write(
            "Project each point layer into the projection of the land cover.--"
            + time.strftime('%Y%m%d--%H%M%S') + '--\n')
        """-------- Prepare Land Cover -------------------------------------------"""
        """ Reclassify LC into Binary Green Space """
        if arcpy.Exists(str(freqDir) + '/GreenIO') == False:
            outReclass5 = arcpy.sa.Reclassify(
                str(freqDir) + '/LC', 'Value',
                arcpy.sa.RemapValue([[0, 0], [10, 0], [20, 0], [21,
                                                                0], [22, 0],
                                     [30, 0], [40, 1], [52, 1], [70, 1],
                                     [80, 1], [82, 1], [91, 1], [92, 1]]))
            outReclass5.save(str(freqDir) + '/GreenIO')
            reportFile.write(
                "Reclassify the 1-Meter EnviroAtlas Land Cover Classification for the EnviroAtlas community into Binary Green Space. REPLACE-GSE--"
                + time.strftime('%Y%m%d--%H%M%S') + '--\n')
            ReuseRF.write("GreenIO--" + time.strftime('%Y%m%d--%H%M%S') +
                          '--\n')

        else:
            reportFile.write(
                "Reclassify the 1-Meter EnviroAtlas Land Cover Classification for the EnviroAtlas community into Binary Green Space. REPLACE-GSE--GreenIO--"
                + '--\n')
        """ Moving Window for Schools - Greenspace, Circle 100 Meters """
        outFocalStat1 = arcpy.sa.FocalStatistics(
            str(freqDir) + '/GreenIO', arcpy.sa.NbrCircle(100, 'CELL'), 'SUM',
            'NODATA')
        outFocalStat1.save('Gre_100C')
        reportFile.write(
            "Run Focal Statistics on the Green Space Binary Raster with a circular window of 100 meters and statistics = SUM.--"
            + time.strftime('%Y%m%d--%H%M%S') + '--\n')
        """-------- Analyze Green Space at School Locations -------------------"""
        """ Extract GS Values at Points """
        arcpy.sa.ExtractValuesToPoints('Daycares', 'Gre_100C', 'Day_Green',
                                       'NONE', 'VALUE_ONLY')
        arcpy.sa.ExtractValuesToPoints('K12', 'Gre_100C', 'K12_Green', 'NONE',
                                       'VALUE_ONLY')
        reportFile.write(
            "Extract Values to Points from the focal statistics raster to both the Daycare and K12 points with Census Block Group GeoIDs and append values to the point file--"
            + time.strftime('%Y%m%d--%H%M%S') + '--\n')
        """ Add Field to Point Layers """
        arcpy.AddField_management('Day_Green', 'Green_Pct', 'DOUBLE')
        arcpy.AddField_management('K12_Green', 'Green_Pct', 'DOUBLE')
        """ Calculate Percent Greenspce """
        arcpy.CalculateField_management('Day_Green', 'Green_Pct',
                                        'float(!RASTERVALU!) /31417 *100',
                                        'PYTHON_9.3')
        arcpy.CalculateField_management('K12_Green', 'Green_Pct',
                                        'float(!RASTERVALU!) /31417 *100',
                                        'PYTHON_9.3')
        reportFile.write(
            "Add new field to each point layer: Green_Pct (float) and calculate where Green_Pct = RASTERVALU / 31417 * 100 (limited to 2 decimal places).--"
            + time.strftime('%Y%m%d--%H%M%S') + '--\n')
        """ Count number of Schools per Block Group """
        arcpy.Statistics_analysis('Day_Green', 'Day_Num',
                                  [['CAPACITY', 'COUNT']], 'bgrp')
        arcpy.Statistics_analysis('K12_Green', 'K12_Num',
                                  [['ENROLLMENT', 'COUNT']], 'bgrp')
        """ Select low Greespace Schools and Count per Block Group """
        arcpy.Select_analysis('Day_Green', 'Day_Low', 'Green_Pct <= 25')
        arcpy.Statistics_analysis('Day_Low', 'Day_NumLow',
                                  [['CAPACITY', 'COUNT']], 'bgrp')

        arcpy.Select_analysis('K12_Green', 'K12_Low', 'Green_Pct <= 25')
        arcpy.Statistics_analysis('K12_Low', 'K12_NumLow',
                                  [['ENROLLMENT', 'COUNT']], 'bgrp')
        reportFile.write(
            "From each point layer, select records with Green_Pct <= 25, then summarize the count of selected schools by block group.--"
            + time.strftime('%Y%m%d--%H%M%S') + '--\n')
        """ Create final table """
        arcpy.TableToTable_conversion(freqDir + '/BG', workDir, 'EduPts', '',
                                      'bgrp')
        arcpy.DeleteField_management('EduPts', [
            'PLx2_Pop', 'PLx2_Pct', 'SUM_HOUSIN', 'SUM_POP10', 'under_1',
            'under_1pct', 'under_13', 'under_13pc', 'over_70', 'over_70pct',
            'Shape_Length', 'Shape_Leng', 'NonWhite', 'NonWt_Pct',
            'Shape_Le_1', 'Shape_Area', 'Density', 'LandA_M', 'EAID',
            'Dasy_Pop', 'State'
        ])
        reportFile.write(
            "Create a new table based on the EnviroAtlas community block groups table retaining the BGRP field.--"
            + time.strftime('%Y%m%d--%H%M%S') + '--\n')
        """ Add fields to new table """
        arcpy.AddField_management('EduPts', 'Day_Count', 'DOUBLE')
        arcpy.AddField_management('EduPts', 'Day_Low', 'DOUBLE')
        arcpy.AddField_management('EduPts', 'K12_Count', 'DOUBLE')
        arcpy.AddField_management('EduPts', 'K12_Low', 'DOUBLE')
        reportFile.write(
            "Add fields to the new table for K12_Count (short), K12_Low (short), Day_Count (short), and Day_Low (short).--"
            + time.strftime('%Y%m%d--%H%M%S') + '--\n')
        """ Join Each Table to the final table and calculate necessary records """
        arcpy.JoinField_management('EduPts', 'bgrp', 'Day_Num', 'bgrp',
                                   ['FREQUENCY'])
        arcpy.CalculateField_management('EduPts', 'Day_Count', '!FREQUENCY!',
                                        'PYTHON')
        arcpy.DeleteField_management('EduPts', 'FREQUENCY')

        arcpy.JoinField_management('EduPts', 'bgrp', 'Day_NumLow', 'bgrp',
                                   ['FREQUENCY'])
        arcpy.CalculateField_management('EduPts', 'Day_Low', '!FREQUENCY!',
                                        'PYTHON')
        arcpy.DeleteField_management('EduPts', 'FREQUENCY')

        arcpy.JoinField_management('EduPts', 'bgrp', 'K12_Num', 'bgrp',
                                   ['FREQUENCY'])
        arcpy.CalculateField_management('EduPts', 'K12_Count', '!FREQUENCY!',
                                        'PYTHON')
        arcpy.DeleteField_management('EduPts', 'FREQUENCY')

        arcpy.JoinField_management('EduPts', 'bgrp', 'K12_NumLow', 'bgrp',
                                   ['FREQUENCY'])
        arcpy.CalculateField_management('EduPts', 'K12_Low', '!FREQUENCY!',
                                        'PYTHON')
        arcpy.DeleteField_management('EduPts', 'FREQUENCY')
        reportFile.write(
            "Join each of the summarized tables with the new table and calculate the corresponding field in the new table.--"
            + time.strftime('%Y%m%d--%H%M%S') + '--\n')
        """ Calculate NULL values, where applicable """
        arcpy.MakeTableView_management('EduPts', 'EduPtsTbl')
        arcpy.SelectLayerByAttribute_management('EduPtsTbl', 'NEW_SELECTION',
                                                'Day_Count IS NULL')
        arcpy.CalculateField_management('EduPtsTbl', 'Day_Count', '0',
                                        'PYTHON_9.3')
        arcpy.CalculateField_management('EduPtsTbl', 'Day_Low', '-99999',
                                        'PYTHON_9.3')
        arcpy.SelectLayerByAttribute_management('EduPtsTbl', 'CLEAR_SELECTION')
        arcpy.SelectLayerByAttribute_management('EduPtsTbl', 'NEW_SELECTION',
                                                'Day_Low IS NULL')
        arcpy.CalculateField_management('EduPtsTbl', 'Day_Low', '0',
                                        'PYTHON_9.3')
        arcpy.SelectLayerByAttribute_management('EduPtsTbl', 'CLEAR_SELECTION')
        arcpy.SelectLayerByAttribute_management('EduPtsTbl', 'NEW_SELECTION',
                                                'K12_Count IS NULL')
        arcpy.CalculateField_management('EduPtsTbl', 'K12_Count', '0',
                                        'PYTHON_9.3')
        arcpy.CalculateField_management('EduPtsTbl', 'K12_Low', '-99999',
                                        'PYTHON_9.3')
        arcpy.SelectLayerByAttribute_management('EduPtsTbl', 'CLEAR_SELECTION')
        arcpy.SelectLayerByAttribute_management('EduPtsTbl', 'NEW_SELECTION',
                                                'K12_Low IS NULL')
        arcpy.CalculateField_management('EduPtsTbl', 'K12_Low', '0',
                                        'PYTHON_9.3')
        arcpy.SelectLayerByAttribute_management('EduPtsTbl', 'CLEAR_SELECTION')
        reportFile.write(
            "Calculate fields where K12_Count = 0: K12_Low = -99999 and Day_Count = 0: Day_Low = -99999--"
            + time.strftime('%Y%m%d--%H%M%S') + '--\n')
        """-------- Check that the Analysis Area is covered by the LC -------------- """
        """ Create a Polygon Version of the LC """
        if arcpy.Exists(freqDir + '/LC_Poly') == False:
            arcpy.env.extent = freqDir + '/LC'
            arcpy.env.snapRaster = freqDir + '/LC'
            ReC = arcpy.sa.Reclassify(
                str(freqDir) + '/LC', 'Value',
                arcpy.sa.RemapValue([[0, 0], [10, 1], [20, 1], [21,
                                                                1], [22, 1],
                                     [30, 1], [40, 1], [52, 1], [70, 1],
                                     [80, 1], [82, 1], [91, 1], [92, 1]]))
            ReC.save(str(freqDir) + '/AreaIO')
            arcpy.RasterToPolygon_conversion(
                str(freqDir) + '/AreaIO',
                str(freqDir) + '/LC_Poly', 'SIMPLIFY')
            arcpy.EliminatePolygonPart_management(
                str(freqDir) + '/LC_Poly',
                str(freqDir) + '/LC_Poly_EP', 'PERCENT', '', '5',
                'CONTAINED_ONLY')
            arcpy.Delete_management(str(freqDir) + '/LC_Poly')
            arcpy.Rename_management(
                str(freqDir) + '/LC_Poly_EP',
                str(freqDir) + '/LC_Poly')
        """ Buffer the LC Polygon by -500m """
        if arcpy.Exists(freqDir + '/Bnd_Cty_500m') == False:
            arcpy.Buffer_analysis(
                str(freqDir) + '/Bnd_Cty',
                str(freqDir) + '/Bnd_Cty_500m', '500 meters')
            arcpy.EliminatePolygonPart_management(
                str(freqDir) + '/Bnd_Cty_500m',
                str(freqDir) + '/Bnd_Cty_500m_EP', 'PERCENT', '', '30',
                'CONTAINED_ONLY')
            arcpy.Delete_management(str(freqDir) + '/Bnd_Cty_500m')
            arcpy.Rename_management(
                str(freqDir) + '/Bnd_Cty_500m_EP',
                str(freqDir) + '/Bnd_Cty_500m')
        """ Identify whether LC is large enough """
        arcpy.MakeFeatureLayer_management(str(freqDir) + '/LC_Poly', 'LClyr')
        arcpy.MakeFeatureLayer_management(
            str(freqDir) + '/Bnd_Cty_500m', 'BC_500lyr')

        arcpy.SelectLayerByLocation_management('BC_500lyr',
                                               'COMPLETELY_WITHIN', 'LClyr',
                                               '', 'NEW_SELECTION')
        bigEnough = float(arcpy.GetCount_management('BC_500lyr').getOutput(0))
        arcpy.SelectLayerByAttribute_management('BC_500lyr', 'CLEAR_SELECTION')
        """ If the LC isn't large enough, edit erroneous BGS """
        if bigEnough == 0:
            """ Identify BGs within 50m of the LC edge """
            arcpy.Buffer_analysis(
                str(freqDir) + '/LC_Poly', 'LC_Poly_Minus100', '-100 meters')
            arcpy.MakeFeatureLayer_management('LC_Poly_Minus100', 'Minus100')
            arcpy.MakeFeatureLayer_management('Day_Low', 'D_L')
            arcpy.MakeFeatureLayer_management('K12_Low', 'K_L')

            arcpy.SelectLayerByLocation_management('D_L', 'WITHIN', 'Minus100',
                                                   '', 'NEW_SELECTION',
                                                   'INVERT')
            arcpy.SelectLayerByLocation_management('K_L', 'WITHIN', 'Minus100',
                                                   '', 'NEW_SELECTION',
                                                   'INVERT')

            dValue = float(arcpy.GetCount_management('D_L').getOutput(0))
            kValue = float(arcpy.GetCount_management('K_L').getOutput(0))
            """ For all BGs too close to the LC edge, assign both fields a value of -99998 """
            if dValue > 0:
                bgrps = []
                cursor = arcpy.SearchCursor('D_L')
                for row in cursor:
                    value = row.getValue('bgrp')
                    bgrps.append(value)
                bgrps = list(set(bgrps))
                expression = ''
                for bgrp in bgrps:
                    expression = expression + " OR bgrp = '" + str(bgrp) + "'"
                expression = expression[4:]
                arcpy.SelectLayerByAttribute_management(
                    'EduPtsTbl', 'NEW_SELECTION', expression)
                arcpy.CalculateField_management('EduPtsTbl', 'Day_Low',
                                                '-99998', 'PYTHON_9.3')
                arcpy.SelectLayerByAttribute_management(
                    'EduPtsTbl', 'CLEAR_SELECTION')

            if kValue > 0:
                bgrps = []
                cursor = arcpy.SearchCursor('K_L')
                for row in cursor:
                    value = row.getValue('bgrp')
                    bgrps.append(value)
                bgrps = list(set(bgrps))
                expression = ''
                for bgrp in bgrps:
                    expression = expression + " OR bgrp = '" + str(bgrp) + "'"
                expression = expression[4:]
                arcpy.SelectLayerByAttribute_management(
                    'EduPtsTbl', 'NEW_SELECTION', expression)
                arcpy.CalculateField_management('EduPtsTbl', 'K12_Low',
                                                '-99998', 'PYTHON_9.3')
                arcpy.SelectLayerByAttribute_management(
                    'EduPtsTbl', 'CLEAR_SELECTION')
            arcpy.SelectLayerByAttribute_management('D_L', 'CLEAR_SELECTION')
            arcpy.SelectLayerByAttribute_management('K_L', 'CLEAR_SELECTION')

            if kValue > 0 or dValue > 0:
                reportFile.write(
                    "Calculate Field for BGs within 50m of the edge of the land cover, All Fields = -99998.--"
                    + time.strftime('%Y%m%d--%H%M%S') + '--\n')
        """ Create final table """
        arcpy.CopyRows_management('EduPtsTbl', 'EduLowGS')
        try:
            arcpy.Delete_management(finalDir + '/' + str(city) + '_EduLowGS')
        except:
            pass
        arcpy.TableToTable_conversion('EduLowGS', finalDir, city + '_EduLowGS')
        allFields = [
            f.name
            for f in arcpy.ListFields(finalDir + '/' + city + '_EduLowGS')
        ]
        for field in allFields:
            if field not in [
                    'bgrp', 'OBJECTID', 'Day_Count', 'Day_Low', 'K12_Count',
                    'K12_Low'
            ]:
                arcpy.DeleteField_management(
                    finalDir + '/' + city + '_EduLowGS', [field])

        reportFile.write(
            "Export the fields to be displayed in EnviroAtlas to a final gdb table: K12_Count, K12_Low, Day_Count, Day_Low.--"
            + time.strftime('%Y%m%d--%H%M%S') + '--\n')

        print 'Schools End Time: ' + time.asctime() + '\n'

        #-------- COMPELETE LOGFILES ---------------------------------------------
        reportFile.close()
        ReuseRF.close()

#-----------------------------------------------------------------------------
# END ANALYSIS
#-----------------------------------------------------------------------------
    except:
        """ This part of the script executes if anything went wrong in the main script above """
        #-------- PRINT ERRORS ---------------------------------------------------
        print "\nSomething went wrong.\n\n"
        print "Python Traceback Message below:"
        print traceback.format_exc()
        print "\nArcMap Error Messages below:"
        print arcpy.GetMessages(2)
        print "\nArcMap Warning Messages below:"
        print arcpy.GetMessages(1)

        #-------- COMPLETE LOGFILE ------------------------------------------------
        reportFile.write("\nSomething went wrong.\n\n")
        reportFile.write("Pyton Traceback Message below:")
        reportFile.write(traceback.format_exc())
        reportFile.write("\nArcMap Error Messages below:")
        reportFile.write(arcpy.GetMessages(2))
        reportFile.write("\nArcMap Warning Messages below:")
        reportFile.write(arcpy.GetMessages(1))

        reportFile.write("\n\nEnded at " + time.asctime() + '\n')
        reportFile.write("\n---End of Log File---\n")

        if reportFile:
            reportFile.close()
コード例 #5
0
def worker(data_path, esri_service=False):
    """The worker function to index feature data and tabular data."""
    if esri_service:
        index_service(job.service_connection)
    else:
        job.connect_to_zmq()
        geo = {}
        entry = {}
        schema = {}
        dsc = arcpy.Describe(data_path)

        try:
            from utils import worker_utils
            geometry_ops = worker_utils.GeometryOps()
        except ImportError:
            geometry_ops = None

        try:
            global_id_field = dsc.globalIDFieldName
        except AttributeError:
            global_id_field = None

        try:
            shape_field_name = dsc.shapeFieldName
        except AttributeError:
            shape_field_name = None

        # Get the table schema.
        table_entry = {}
        schema['name'] = dsc.name
        try:
            alias = dsc.aliasName
        except AttributeError:
            alias = dsc.name
        if not dsc.name == alias:
            schema['alias'] = alias
        schema['OIDFieldName'] = dsc.OIDFieldName
        if shape_field_name:
            schema['shapeFieldName'] = shape_field_name
            schema['wkid'] = dsc.spatialReference.factoryCode
        if global_id_field:
            schema['globalIDField'] = global_id_field
        schema_fields = []
        for fld in dsc.fields:
            field = {}
            props = []
            field['name'] = fld.name
            field['alias'] = fld.aliasName
            field['type'] = fld.type
            field['domain'] = fld.domain
            if fld.isNullable:
                props.append('nullable')
            else:
                props.append('notnullable')
            indexes = dsc.indexes
            if indexes:
                for index in indexes:
                    if fld.name in [f.name for f in index.fields]:
                        props.append('indexed')
                        break
                    else:
                        props.append('notindexed')
                        break
            field['properties'] = props
            schema_fields.append(field)
        schema['fields'] = schema_fields

        # Add and entry for the table and it's schema.
        schema['rows'] = int(arcpy.GetCount_management(data_path).getOutput(0))
        table_entry['id'] = '{0}_{1}'.format(job.location_id, dsc.name)
        table_entry['location'] = job.location_id
        table_entry['action'] = job.action_type
        table_entry['format_type'] = 'Schema'
        table_entry['entry'] = {
            'fields': {
                '_discoveryID': job.discovery_id,
                'name': dsc.name,
                'path': dsc.catalogPath,
                'format': 'schema'
            }
        }
        table_entry['entry']['fields']['schema'] = schema

        if job.schema_only:
            job.send_entry(table_entry)
            return table_entry
        else:
            job.send_entry(table_entry)

        if dsc.dataType == 'Table':
            # Get join information.
            table_join = job.get_join(dsc.name)
            if table_join:
                table_view = arcpy.MakeTableView_management(
                    dsc.catalogPath, 'view')
                arcpy.AddJoin_management(
                    table_view, table_join['field'],
                    os.path.join(job.path, table_join['table']),
                    table_join['field'], 'KEEP_COMMON')
            else:
                table_view = dsc.catalogPath

            # Get any query or constraint.
            query = job.get_table_query(dsc.name)
            constraint = job.get_table_constraint(dsc.name)
            if query and constraint:
                expression = """{0} AND {1}""".format(query, constraint)
            else:
                if query:
                    expression = query
                else:
                    expression = constraint

            field_types = job.search_fields(table_view)
            fields = field_types.keys()
            row_count = float(
                arcpy.GetCount_management(table_view).getOutput(0))
            if row_count == 0.0:
                return

            with arcpy.da.SearchCursor(table_view, fields, expression) as rows:
                mapped_fields = job.map_fields(dsc.name, fields, field_types)
                new_fields = job.new_fields
                ordered_fields = OrderedDict()
                for f in mapped_fields:
                    ordered_fields[f] = None
                increment = job.get_increment(row_count)
                for i, row in enumerate(rows, 1):
                    try:
                        if job.domains:
                            row = update_row(dsc.fields, rows, list(row))
                        mapped_fields = dict(zip(ordered_fields.keys(), row))
                        mapped_fields['_discoveryID'] = job.discovery_id
                        mapped_fields['meta_table_name'] = dsc.name
                        if hasattr(dsc, 'aliasName') and dsc.aliasName:
                            mapped_fields[
                                'meta_table_alias_name'] = dsc.aliasName
                        else:
                            mapped_fields['meta_table_alias_name'] = dsc.name
                        mapped_fields['format_category'] = 'GIS'
                        mapped_fields['format_type'] = "Record"
                        mapped_fields[
                            'format'] = "application/vnd.esri.{0}.record".format(
                                dsc.dataType.lower())
                        for nf in new_fields:
                            if nf['name'] == '*' or nf['name'] == dsc.name:
                                for k, v in nf['new_fields'].iteritems():
                                    mapped_fields[k] = v
                        oid_field = filter(
                            lambda x: x in ('FID', 'OID', 'OBJECTID'),
                            rows.fields)
                        if oid_field:
                            fld_index = rows.fields.index(oid_field[0])
                        else:
                            fld_index = i
                        if global_id_field:
                            mapped_fields['meta_{0}'.format(
                                global_id_field)] = mapped_fields.pop(
                                    'fi_{0}'.format(global_id_field))
                        entry['id'] = '{0}_{1}_{2}'.format(
                            job.location_id, os.path.basename(data_path),
                            row[fld_index])
                        entry['location'] = job.location_id
                        entry['action'] = job.action_type
                        entry['relation'] = 'contains'
                        entry['entry'] = {'fields': mapped_fields}
                        entry['entry']['links'] = [{
                            'relation': 'database',
                            'id': table_entry['id']
                        }]
                        job.send_entry(entry)
                        if (i % increment) == 0:
                            status_writer.send_percent(
                                i / row_count,
                                "{0} {1:%}".format(dsc.name, i / row_count),
                                'esri_worker')
                    except (AttributeError, RuntimeError):
                        continue
        else:
            generalize_value = job.generalize_value
            sr = arcpy.SpatialReference(4326)
            geo['spatialReference'] = dsc.spatialReference.name
            geo['code'] = dsc.spatialReference.factoryCode

            # Get join information.
            table_join = job.get_join(dsc.name)
            if table_join:
                lyr = arcpy.MakeFeatureLayer_management(dsc.catalogPath, 'lyr')
                arcpy.AddJoin_management(
                    lyr, table_join['input_join_field'],
                    os.path.join(job.path, table_join['table']),
                    table_join['output_join_field'], 'KEEP_COMMON')
            else:
                lyr = dsc.catalogPath

            field_types = job.search_fields(lyr)
            fields = field_types.keys()
            query = job.get_table_query(dsc.name)
            constraint = job.get_table_constraint(dsc.name)
            if query and constraint:
                expression = """{0} AND {1}""".format(query, constraint)
            else:
                if query:
                    expression = query
                else:
                    expression = constraint
            if dsc.shapeFieldName in fields:
                fields.remove(dsc.shapeFieldName)
                field_types.pop(dsc.shapeFieldName)
            elif table_join:
                fields.remove(arcpy.Describe(lyr).shapeFieldName)
                field_types.pop(arcpy.Describe(lyr).shapeFieldName)
            row_count = float(arcpy.GetCount_management(lyr).getOutput(0))
            if row_count == 0.0:
                return
            if dsc.shapeType == 'Point':
                with arcpy.da.SearchCursor(lyr, ['SHAPE@'] + fields,
                                           expression, sr) as rows:
                    mapped_fields = job.map_fields(dsc.name,
                                                   list(rows.fields[1:]),
                                                   field_types)
                    new_fields = job.new_fields
                    ordered_fields = OrderedDict()
                    for f in mapped_fields:
                        ordered_fields[f] = None
                    increment = job.get_increment(row_count)
                    for i, row in enumerate(rows):
                        try:
                            if job.domains:
                                row = update_row(dsc.fields, rows, list(row))
                            if row[0]:
                                geo['lon'] = row[0].firstPoint.X
                                geo['lat'] = row[0].firstPoint.Y
                            mapped_fields = dict(
                                zip(ordered_fields.keys(), row[1:]))
                            mapped_fields['_discoveryID'] = job.discovery_id
                            mapped_fields['meta_table_name'] = dsc.name
                            if hasattr(dsc, 'aliasName') and dsc.aliasName:
                                mapped_fields[
                                    'meta_table_alias_name'] = dsc.aliasName
                            else:
                                mapped_fields[
                                    'meta_table_alias_name'] = dsc.name
                            mapped_fields['format_category'] = 'GIS'
                            mapped_fields['geometry_type'] = 'Point'
                            mapped_fields['format_type'] = 'Feature'
                            mapped_fields[
                                'format'] = "application/vnd.esri.{0}.feature".format(
                                    dsc.dataType.lower())
                            for nf in new_fields:
                                if nf['name'] == '*' or nf['name'] == dsc.name:
                                    for k, v in nf['new_fields'].iteritems():
                                        mapped_fields[k] = v
                            if global_id_field:
                                mapped_fields['meta_{0}'.format(
                                    global_id_field)] = mapped_fields.pop(
                                        'fi_{0}'.format(global_id_field))
                            entry['id'] = '{0}_{1}_{2}'.format(
                                job.location_id, os.path.basename(data_path),
                                i)
                            entry['location'] = job.location_id
                            entry['action'] = job.action_type
                            entry['relation'] = 'contains'
                            entry['entry'] = {
                                'geo': geo,
                                'fields': mapped_fields
                            }
                            entry['entry']['links'] = [{
                                'relation': 'database',
                                'id': table_entry['id']
                            }]
                            job.send_entry(entry)
                            if (i % increment) == 0:
                                status_writer.send_percent(
                                    i / row_count,
                                    "{0} {1:%}".format(dsc.name,
                                                       i / row_count),
                                    'esri_worker')
                        except (AttributeError, RuntimeError):
                            continue
            else:
                with arcpy.da.SearchCursor(lyr, ['SHAPE@'] + fields,
                                           expression, sr) as rows:
                    increment = job.get_increment(row_count)
                    mapped_fields = job.map_fields(dsc.name,
                                                   list(rows.fields[1:]),
                                                   field_types)
                    new_fields = job.new_fields
                    ordered_fields = OrderedDict()
                    for f in mapped_fields:
                        ordered_fields[f] = None
                    for i, row in enumerate(rows):
                        try:
                            if job.domains:
                                row = update_row(dsc.fields, rows, list(row))
                            if row[0]:
                                if generalize_value == 0 or generalize_value == 0.0:
                                    geo['wkt'] = row[0].WKT
                                else:
                                    if geometry_ops:
                                        geo['wkt'] = geometry_ops.generalize_geometry(
                                            row[0].WKT, generalize_value)
                                    else:
                                        geo['xmin'] = row[0].extent.XMin
                                        geo['xmax'] = row[0].extent.XMax
                                        geo['ymin'] = row[0].extent.YMin
                                        geo['ymax'] = row[0].extent.YMax
                            mapped_fields = dict(
                                zip(ordered_fields.keys(), row[1:]))
                            mapped_fields['_discoveryID'] = job.discovery_id
                            mapped_fields['meta_table_name'] = dsc.name
                            if hasattr(dsc, 'aliasName') and dsc.aliasName:
                                mapped_fields[
                                    'meta_table_alias_name'] = dsc.aliasName
                            else:
                                mapped_fields[
                                    'meta_table_alias_name'] = dsc.name
                            for nf in new_fields:
                                if nf['name'] == '*' or nf['name'] == dsc.name:
                                    for k, v in nf['new_fields'].iteritems():
                                        mapped_fields[k] = v
                            if global_id_field:
                                mapped_fields['meta_{0}'.format(
                                    global_id_field)] = mapped_fields.pop(
                                        'fi_{0}'.format(global_id_field))
                            mapped_fields['geometry_type'] = dsc.shapeType
                            mapped_fields['format_category'] = 'GIS'
                            mapped_fields['format_type'] = 'Feature'
                            mapped_fields[
                                'format'] = "application/vnd.esri.{0}.feature".format(
                                    dsc.dataType.lower())
                            entry['id'] = '{0}_{1}_{2}'.format(
                                job.location_id,
                                os.path.splitext(
                                    os.path.basename(data_path))[0], i)
                            entry['location'] = job.location_id
                            entry['action'] = job.action_type
                            entry['entry'] = {
                                'geo': geo,
                                'fields': mapped_fields
                            }
                            entry['entry']['links'] = [{
                                'relation': 'database',
                                'id': table_entry['id']
                            }]
                            job.send_entry(entry)
                            if (i % increment) == 0:
                                status_writer.send_percent(
                                    i / row_count,
                                    "{0} {1:%}".format(dsc.name,
                                                       i / row_count),
                                    'esri_worker')
                        except (AttributeError, RuntimeError):
                            continue

        return table_entry
コード例 #6
0
            cursor.updateRow(row)
    arcpy.Append_management(project_dissolved, temp_projects_fc, 'NO_TEST')

# -----------------------------------------------------------------------------
#  Merge updated projects with unaltered projects.
# -----------------------------------------------------------------------------
# Copy features and coding of unaltered projects in MHN.
unaltered_projects_query = ''' "{0}" NOT IN ('{1}') '''.format(
    common_id_field, "','".join(project_arcs.keys()))

unaltered_projects_lyr = 'unaltered_projects_lyr'
arcpy.MakeFeatureLayer_management(MHN.hwyproj, unaltered_projects_lyr,
                                  unaltered_projects_query)

unaltered_coding_view = 'unaltered_coding_view'
arcpy.MakeTableView_management(MHN.route_systems[MHN.hwyproj][0],
                               unaltered_coding_view, unaltered_projects_query)

# Append features/coding from temp FC/table.
updated_projects_fc = os.path.join(MHN.mem, 'updated_projects_fc')
arcpy.Merge_management((unaltered_projects_lyr, temp_projects_fc),
                       updated_projects_fc)

updated_coding_table = os.path.join(MHN.mem, 'updated_coding_table')
arcpy.Merge_management((unaltered_coding_view, temp_coding_table),
                       updated_coding_table)

# -----------------------------------------------------------------------------
#  Commit the changes only after everything else has run successfully.
# -----------------------------------------------------------------------------
backup_gdb = MHN.gdb[:-4] + '_' + MHN.timestamp() + '.gdb'
arcpy.Copy_management(MHN.gdb, backup_gdb)
コード例 #7
0
    def execute(self, parameters, messages):
        """The source code of the tool."""

        # local variables and env
        arcpy.CreateFileGDB_management("E:/gina/poker/gdb",
                                       parameters[0].valueAsText)
        arcpy.env.workspace = "E:/gina/poker/gdb/" + parameters[
            0].valueAsText + ".gdb"
        arcpy.env.overwriteOutput = True
        adnr_lo_shp = "E:/gina/poker/shp/wip/land_ownership_data/adnr_gls_dls_merge_20170823_v1.shp"
        pfrr_popn_places = "E:/gina/poker/shp/wip/popn_places_data/pokerflat_popn_places_gcs_wgs84_to_akalbers_2.shp"
        afs_known_sites = "E:/gina/poker/shp/afs_data/afs_known_sites_20180629_3338.shp"
        pipTable = "E:/gina/poker/dbf/predicted_impact_xy.dbf"
        pip_point_shp = "E:/gina/poker/pip/pip_point.shp"
        pip_point_3338 = "E:/gina/poker/pip/pip_point_3338.shp"
        pip_buffer_shp = "E:/gina/poker/pip/pip_buffer.shp"
        pip_range_rings_shp = "E:/gina/poker/pip/pip_range_rings.shp"
        pip_lo_in_buffer_shp = "E:/gina/poker/pip/pip_lo_in_buffer.shp"
        pip_lo_in_buf_sum_dbf = "E:/gina/poker/pip/pip_lo_in_buf_sum.dbf"
        pip_lo_in_buf_sum_csv = "E:/gina/poker/pip/pip_lo_in_buf_sum.csv"
        pip_popn_places_in_buffer_shp = "E:/gina/poker/pip/pip_popn_places_in_buffer.shp"
        pip_known_sites_in_buffer_shp = "E:/gina/poker/pip/pip_known_sites_in_buffer.shp"
        x = parameters[2].valueAsText
        y = parameters[3].valueAsText
        r = parameters[6].valueAsText + " NauticalMiles"
        rr1 = (float(parameters[6].valueAsText)) / 3
        rr2 = (rr1 * 2)
        rrs = str(rr1) + ";" + str(rr2) + ";" + r.split(" ")[0]
        pipLayer = "pipLayer1"
        srs = arcpy.SpatialReference("Alaska Albers Equal Area Conic")
        intersect_fc1 = [adnr_lo_shp, pip_buffer_shp]
        intersect_fc2 = [pfrr_popn_places, pip_buffer_shp]
        intersect_fc3 = [afs_known_sites, pip_buffer_shp]
        mxd = arcpy.mapping.MapDocument("current")
        dataframe = arcpy.mapping.ListDataFrames(mxd)[0]
        sourceLoSymbologyLayer = arcpy.mapping.Layer(
            "E:/gina/poker/lyr/lo2.lyr")
        sourcePipSymbologyLayer = arcpy.mapping.Layer(
            "E:/gina/poker/lyr/pip2.lyr")
        sourceRrsSymbologyLayer = arcpy.mapping.Layer(
            "E:/gina/poker/lyr/rrs.lyr")
        sourcePopSymbologyLayer = arcpy.mapping.Layer(
            "E:/gina/poker/lyr/pop.lyr")
        sourceAfsSymbologyLayer = arcpy.mapping.Layer(
            "E:/gina/poker/lyr/afs2.lyr")

        # Process: Calculate Lon Field
        arcpy.CalculateField_management(pipTable, "Lon", x, "PYTHON", "")

        # Process: Calculate Lat Field
        arcpy.CalculateField_management(pipTable, "Lat", y, "PYTHON", "")

        # Process: Make XY Event Layer
        arcpy.MakeXYEventLayer_management(
            pipTable, "Lon", "Lat", pipLayer,
            "GEOGCS['GCS_WGS_1984',DATUM['D_WGS_1984',SPHEROID['WGS_1984',6378137.0,298.257223563]],PRIMEM['Greenwich',0.0],UNIT['Degree',0.0174532925199433]];-400 -400 1000000000;-100000 10000;-100000 10000;8.98315284119522E-09;0.001;0.001;IsHighPrecision",
            "")

        # Process: Copy Features
        arcpy.CopyFeatures_management(pipLayer, pip_point_shp, "", "0", "0",
                                      "0")

        # Process: Project pip point
        arcpy.Project_management(pip_point_shp, pip_point_3338, srs)

        # Process: Buffer pip point
        arcpy.Buffer_analysis(pip_point_3338, pip_buffer_shp, r, "FULL",
                              "ROUND", "NONE", "", "PLANAR")

        # Process: Multiple Ring Buffer
        arcpy.MultipleRingBuffer_analysis(pip_point_3338, pip_range_rings_shp,
                                          rrs, "NauticalMiles", "", "NONE",
                                          "FULL")

        # Process: Intersect pip buffer with land ownership
        arcpy.Intersect_analysis(intersect_fc1, pip_lo_in_buffer_shp, "ALL",
                                 "", "INPUT")

        # Process: Intersect pip buffer with popn places
        arcpy.Intersect_analysis(intersect_fc2, pip_popn_places_in_buffer_shp,
                                 "ALL", "", "INPUT")

        # Process: Intersect pip buffer with afs known sites
        arcpy.Intersect_analysis(intersect_fc3, pip_known_sites_in_buffer_shp,
                                 "ALL", "", "INPUT")

        # Process: Make feature layers and add to the map
        ## pip feature class list
        fclist = arcpy.ListFeatureClasses()

        ## pip layer
        arcpy.MakeFeatureLayer_management(pip_point_3338,
                                          "Predicted Impact Point")

        ## land ownership layer
        arcpy.MakeFeatureLayer_management(
            pip_lo_in_buffer_shp,
            "Land Ownership within 3sigma of Predicted Impact Point")

        ## Range Rings
        arcpy.MakeFeatureLayer_management(pip_range_rings_shp, "Range Rings")

        ## populated places layer
        popn_places_records = int(
            arcpy.GetCount_management(pip_popn_places_in_buffer_shp).getOutput(
                0))
        if popn_places_records > 0:
            arcpy.MakeFeatureLayer_management(
                pip_popn_places_in_buffer_shp,
                "Populated Places within 3sigma of Predicted Impact Point")
            addPipPopnPlacesLayer = arcpy.mapping.Layer(
                "Populated Places within 3sigma of Predicted Impact Point")
            arcpy.mapping.AddLayer(dataframe, addPipPopnPlacesLayer)

        ## known sites layer
        known_sites_records = int(
            arcpy.GetCount_management(pip_known_sites_in_buffer_shp).getOutput(
                0))
        if known_sites_records > 0:
            arcpy.MakeFeatureLayer_management(
                pip_known_sites_in_buffer_shp,
                "AFS Known Sites within 3sigma of Predicted Impact Point")
            addPipKnownSitesLayer = arcpy.mapping.Layer(
                "AFS Known Sites within 3sigma of Predicted Impact Point")
            arcpy.mapping.AddLayer(dataframe, addPipKnownSitesLayer)

        addPipPointLayer = arcpy.mapping.Layer("Predicted Impact Point")
        arcpy.mapping.AddLayer(dataframe, addPipPointLayer)

        add3sigmaLoLayer = arcpy.mapping.Layer(
            "Land Ownership within 3sigma of Predicted Impact Point")
        arcpy.mapping.AddLayer(dataframe, add3sigmaLoLayer)

        addRangeRings = arcpy.mapping.Layer("Range Rings")
        arcpy.mapping.AddLayer(dataframe, addRangeRings)

        # Add and calc Acres field for intersected Land Ownership
        arcpy.AddField_management(pip_lo_in_buffer_shp, "Acres", "DOUBLE")
        arcpy.CalculateField_management(pip_lo_in_buffer_shp, "Acres",
                                        "!shape.area@acres!", "PYTHON_9.3", "")

        # Summarize intersected Land Ownership by Owner and total Acres
        arcpy.Statistics_analysis(pip_lo_in_buffer_shp, pip_lo_in_buf_sum_dbf,
                                  "Acres SUM", "OWNER")
        arcpy.MakeTableView_management(pip_lo_in_buf_sum_dbf)
        add3sigmaLoSumTbl = arcpy.mapping.TableView(pip_lo_in_buf_sum_dbf)
        arcpy.mapping.AddTableView(dataframe, add3sigmaLoSumTbl)

        # Symbolize and Refresh
        lo_layer = arcpy.mapping.ListLayers(
            mxd, "*Land Ownership within 3sigma of Predicted Impact Point*",
            dataframe)[0]
        arcpy.mapping.UpdateLayer(dataframe, lo_layer, sourceLoSymbologyLayer,
                                  True)
        lo_layer.symbology.addAllValues()

        pip_layer = arcpy.mapping.ListLayers(mxd, "*Predicted Impact Point*",
                                             dataframe)[0]
        arcpy.mapping.UpdateLayer(dataframe, pip_layer,
                                  sourcePipSymbologyLayer, True)

        rr_layer = arcpy.mapping.ListLayers(mxd, "*Range Rings*", dataframe)[0]
        arcpy.mapping.UpdateLayer(dataframe, rr_layer, sourceRrsSymbologyLayer,
                                  True)

        pop_layer = arcpy.mapping.ListLayers(mxd, "*Populated Places*",
                                             dataframe)[0]
        arcpy.mapping.UpdateLayer(dataframe, pop_layer,
                                  sourcePopSymbologyLayer, True)

        afs_layer = arcpy.mapping.ListLayers(mxd, "*Known Sites*",
                                             dataframe)[0]
        arcpy.mapping.UpdateLayer(dataframe, afs_layer,
                                  sourceAfsSymbologyLayer, True)

        arcpy.RefreshTOC()
        arcpy.RefreshActiveView()

        # Populate Mission GDB
        mission_layers = [
            pip_point_3338, pip_lo_in_buffer_shp,
            pip_popn_places_in_buffer_shp, pip_range_rings_shp,
            pip_known_sites_in_buffer_shp
        ]
        arcpy.FeatureClassToGeodatabase_conversion(mission_layers,
                                                   arcpy.env.workspace)

        return
コード例 #8
0
def SppInAOI(AOIShp, hucShp, workDir, origin, season, reproduction,
                 presence):
    '''
    (string, string, string, string, list, list, list, list) -> list
    
    Returns a list of species occurring within the provided polygon.  Runtime
    is about 3-5 minutes.
    
    Arguments:
    AOIShp -- A shapefile polygon (dissolved) to investigate.  Should have 
        the same coordinate systems as the huc shapefile.
    hucShp -- A 12 digit huc shapefile that matches the GAP species database hucs.
    workDir -- Where to work and save output.
    origin -- Origin codes to include.
    season -- Season codes to include.
    reproduction -- Reproduction codes to include.
    presence -- Presence codes to include.
    
    Example:
    >>> sppList = SppInPolygon(AOIShp = "T:/Temp/BlueMountains2.shp",
                               hucShp = config.hucs,
                               workDir = "T:/Temp/",
                               origin = [1],
                               season = [1, 3, 4],
                               reproduction = [1, 2, 3],
                               presence = [1, 2, 3])
    '''    
    import arcpy
    arcpy.ResetEnvironments()
    arcpy.env.overwriteOutput=True
    arcpy.env.workspace = workDir
    import pandas as pd
    
    ##############################################  Get list of hucs within polygon
    ###############################################################################
    print("\nSelecting HUCs that intersect with the AOI shapefile\n")
    arcpy.management.MakeFeatureLayer(hucShp, 'HUCs_lyr')
    arcpy.management.MakeFeatureLayer(AOIShp, 'shp_lyr')
    arcpy.management.SelectLayerByLocation('HUCs_lyr', 'INTERSECT', 'shp_lyr')
    
    # Make an empty list to append
    selHUCsList = []
    # Get the fields from the input selected HUCs layer
    fields = arcpy.ListFields('HUCs_lyr')
    # Create a fieldinfo object
    fieldinfo = arcpy.FieldInfo()
    # Use only the HUC12RNG field and set it to fieldinfo
    for field in fields:
        if field.name == "HUC12RNG":
            fieldinfo.addField(field.name, field.name, "VISIBLE", "")
    # The selected HUCs layer will have fields as set in fieldinfo object
    arcpy.MakeTableView_management("HUCs_lyr", "selHUCsTV", "", "", fieldinfo)
    # Loop through the selected HUCs and add them to a list
    for row in sorted(arcpy.da.SearchCursor('selHUCsTV', ['HUC12RNG'])):
        selHUCsList.append(row[0])
    # Make the selected HUCs list a set for comparing with species range HUCs
    selHUCsSet = set(selHUCsList)
    
    #################################################  Get a species list to assess
    ###############################################################################  
    print("Comparing species ranges to selected HUCs\n")
    ## Make WHRdb and Species databse connections
    whrCursor, whrConn = gapdb.ConnectWHR()
    sppCursor, sppConn = gapdb.ConnectSppDB()
    
    # Build and SQL statement that returns CONUS
    # full species codes and names that are in the modeled list
    sql = """SELECT t.strUC, t.strCommonName, t.strScientificName,
                    t.strsubSciNameText, t.ysnInclude, intRegionCode               
                    FROM dbo.tblAllSpecies as t
                    WHERE (t.ysnInclude = 'True') AND t.intRegionCode < 7"""
    
    # Pull into a dataframe
    dfAllSpp = pd.read_sql(sql, whrConn)
     # Drop the region code and include fields
    dfAllSpp = dfAllSpp.drop(['intRegionCode','ysnInclude'], axis=1)
    # Drop duplicates to get unique species codes
    dfUnique = dfAllSpp.drop_duplicates(subset='strUC', keep='first')
    
    ################################  Asses each species' occurence in polygon hucs
    ###############################################################################  
    # List to collect species in AOI
    masterList = []
    for SC in list(dfUnique.strUC):
        taxa = dictionaries.taxaDict[SC[0]]
        
        # What hucs are species' in?
        sql = """SELECT t.strHUC12RNG, t.strUC, t.intGapOrigin, t.intGapPres, 
                    t.intGapRepro, t.intGapSeas 
                    FROM dbo.tblRanges_""" + taxa + """ as t
                    WHERE (t.strUC = '""" + str(SC) + """') 
                    AND t.strHUC12RNG < '190000000000'"""
        dfRngHUCs = pd.read_sql(sql, sppConn)
        
        # Which hucs have acceptable attributes?
        select={'intGapPres':presence, 'intGapSeas':season, 
                'intGapOrigin':origin, 'intGapRepro':reproduction}
        dfS1 = dfRngHUCs[dfRngHUCs[select.keys()].isin(select).all(axis=1)]   
        
        # Get the strHUC12RNG column into a set
        SpeciesSet = set(dfS1[dfS1.columns[0]].tolist())
        
        # Compare the species and AOI huc sets to see if there's any overlap.
        if len(selHUCsSet & SpeciesSet) > 0:
            masterList.append(SC)
        else:
            pass 
    
    if len(masterList) == 0:
        print "!!!!  There was some sort of problem  !!!!\n"
    else:
        # Delete cursors and close db connections
        sppConn.close()
        whrConn.close()
        del sppCursor, sppConn
        del whrCursor, whrConn
        
        return masterList
コード例 #9
0
output_cell_size = arcpy.env.cellSize
arcpy.env.extent = elevation_raster
arcpy.env.overwriteOutput = True
arcpy.env.parallelProcessingFactor = "75%"
#arcpy.Delete_management("in_memory")

#extract elevations to stations
arcpy.AddMessage("Extracting elevations")
fcStations_wElevation = scratchGDB + "/stations_wElevation"
arcpy.gp.ExtractValuesToPoints_sa(station_locations, elevation_raster,
                                  fcStations_wElevation, "NONE", "VALUE_ONLY")

#Calculate vapor pressure averages over the 3 hour time period (ignoring "no-data" values: -999 etc.)
#and join to stations_wElevation
arcpy.AddMessage("Calculating average dewpoint temperatures")
arcpy.MakeTableView_management(data_table, "table1",
                               "dewpoint_temperature > -500")
arcpy.Statistics_analysis("table1", "in_memory/tableAverage",
                          "dewpoint_temperature MEAN", "site_key")
arcpy.JoinField_management(fcStations_wElevation, "Site_Key",
                           "in_memory/tableAverage", "site_key",
                           "MEAN_dewpoint_temperature")

#Delete rows from stations_wElevation that have negative or null elevations
arcpy.AddMessage(
    "Removing \"no-data\" rows from station locations (eg. negative elevations)"
)
cursor = arcpy.UpdateCursor(fcStations_wElevation)
for row in cursor:
    if row.getValue("RASTERVALU") < 0 or row.getValue("RASTERVALU") == "None":
        cursor.deleteRow(row)
del cursor
    arcpy.MakeFeatureLayer_management(featureClass, layerViewName)

    adsGDB = os.path.dirname(featureClass)
    arcpy.env.workspace = adsGDB
    for DCAValue in DCAValues:
        individualDCATables = [
            os.path.join(adsGDB, table)
            for table in arcpy.ListTables('*{}*'.format(DCAValue))
            if 'Copy' not in table
        ]

        for individualTable in individualDCATables:
            year = individualTable[-4:]
            arcpy.AddMessage('Working on ' + table)
            table = '{}_View'.format(os.path.basename(individualTable))
            arcpy.MakeTableView_management(individualTable, table)
            selectTableName = '{}_Copy'.format(
                os.path.basename(individualTable))

            selectTablePath = os.path.join(scratchGDB, selectTableName)
            arcpy.TableSelect_analysis(table, selectTablePath, '1=1')

            arcpy.MakeTableView_management(selectTablePath, selectTableName)

            #mergedTableName = '{}_{}'.format(region, table.replace(
            #    'Expanded', '').replace('_View', ''))
            mergedTableName = 'ADS_Expanded_{}_{}_Copy_Merged'.format(
                DCAValue, year)

            arcpy.TableToTable_conversion(selectTableName, outPutGDBPath,
                                          mergedTableName)
コード例 #11
0
def GetEndemics(extentShapefile, shpHucs, workDir, keyword):
    """
    (string, string, string) -> string & saved csv file.
                         
        Use this to create a CSV file of species' (including subspecies)
    whose ranges are endemic to a specified input AOI shapefile.
    Generally, the AOI shapefile should be a single polygon. The script
    uses a select by location function in which 12-digit HUCs are
    selected that are completely within the AOI shapefile. If there
    is more than one polygon, the selections will be made within each
    individual polygon - i.e. there will by multiple selections as
    opposed to one continuous set of HUCs.
    The shapefile must have projection and coordinate system that 
    matches the 12-digit HUC shapefile from which species' ranges are
    derived.
    
    The final CSV file will contain the following fields:
    Species Code
    Scientific Name
    Common Name
    
    NOTE: Be careful with this function, finding endemics may be more 
    difficult than it seems. This obviously does not take into account 
    species' ranges outside CONUS since GAP ranges are not complete outside
    the lower 48 (with some AK, HI, PR exceptions). And, obviously again, this
    does not take into consideration ranges in other countries during
    different seasons. It would be possible to alter this script to
    look for seasonal endemism. As currently written, the sql query
    to get HUC range data includes all seasons and known, possibly,
    and potentially present ocurrence status.  Also, bear in mind that you
    may need to take extra caution regarding endemic species that are 
    distributed up to the edges of oceans.
    
    Arguments:
    extentShapfile -- A designated AOI shapefile with projection and coordinate
                system to match the 12-digit HUC range shapefile.
    shpHucs -- A 12-digit HUC range shapefile.
    workDir -- Where to save the csv file (KeywordEndemicSpecies.txt)
    keyword -- Keyword to use in output file name, whatever you want that to be.
    
    Example:
    >> csvPath = GetEndemics(extent="T:/Project/ProjectExtent.shp",
                                           workDir='T:/Project/',
                                           shpHUCs="T:/hucs.shp",
                                           keyword="ThisProject")
    """
    import arcpy
    import pandas as pd, datetime
    from datetime import datetime
    starttime = datetime.now()
    
    # +++++++++++++++++++++++++++++++++++++++++++++++++++++++++
    #            ++++ Directory & File Locations ++++
    arcpy.env.workspace = workDir

    # ***************************************************************
    ''' ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
        Select HUCs of the CONUS HUC shapefile that are completely within the
        user defined source layer feature shapefile. Each must be made into a
        layer prior to using SelectLayerByLocation
    '''
    print "\nSelecting HUCs completely within the designated shapefile ....\n"
    
    arcpy.MakeFeatureLayer_management(shpHucs, 'HUCs_lyr')
    arcpy.MakeFeatureLayer_management(extentShapefile, 'shp_lyr')
    arcpy.SelectLayerByLocation_management('HUCs_lyr', 'COMPLETELY_WITHIN', 'shp_lyr')
    
    # Make an empty list to append
    selHUCsList = []
    # Get the fields from the input selected HUCs layer
    fields = arcpy.ListFields('HUCs_lyr')
    
    # Create a fieldinfo object
    fieldinfo = arcpy.FieldInfo()
    
    # Use only the HUC12RNG field and set it to fieldinfo
    for field in fields:
        if field.name == "HUC12RNG":
            fieldinfo.addField(field.name, field.name, "VISIBLE", "")
    
    # The selected HUCs layer will have fields as set in fieldinfo object
    arcpy.MakeTableView_management("HUCs_lyr", "selHUCsTV", "", "", fieldinfo)
    
    # Loop through the selected HUCs and add them to a list
    for row in sorted(arcpy.da.SearchCursor('selHUCsTV', ['HUC12RNG'])):
        selHUCsList.append(row[0])
    # Make the selected HUCs list a set for comparing with species range HUCs
    selHUCsSet = set(selHUCsList)
    
        
    ''' ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
        Get HUC range data from the Species Database
    '''
    print "\n++++++++++++++ Comparing species ranges to selected HUCs +++++++++++++++++\n"
    
    # Make an empty master dataframe
    dfMaster = pd.DataFrame()
    
    ## Make WHRdb and Species databse connections
    whrCursor, whrConn = gapdb.ConnectWHR()
    sppCursor, sppConn = gapdb.ConnectSppDB()
    
    # Build and SQL statement that returns CONUS
    # full species codes and names that are in the modeled list
    sql = """SELECT t.strUC, t.strCommonName, t.strScientificName,
                    t.strsubSciNameText, t.ysnInclude, intRegionCode               
                    FROM dbo.tblAllSpecies as t
                    WHERE (t.ysnInclude = 'True') AND t.intRegionCode < 7"""
    
    # Pull into a dataframe
    dfAllSpp = pd.read_sql(sql, whrConn)
     # Drop the region code and include fields
    dfAllSpp = dfAllSpp.drop(['intRegionCode','ysnInclude'], axis=1)
    # Drop duplicates to get unique species codes
    dfUnique = dfAllSpp.drop_duplicates(subset='strUC', keep='first')
        
    
    ''' Loop over the unique species list to calculate each
        one's range size and percentage
    '''
    # Set up an iterator to get row index for dfUSpp dataframe
    # First, sort and reset the row index in dfUnique dataframe
    dfSort = dfUnique.sort_values(by='strUC')
    dfUSpp = dfSort.reset_index(drop=True)
    i = -1
    for spp in dfUSpp['strUC']:
        
        print "Working on " + spp + " ...."
        
        # Add one to the iterartor
        i += 1
        # Now, get the scientific name, subspecies name,
        # common name, and species code based on row index
        SN = dfUSpp['strScientificName'][i]
        SSN = dfUSpp['strsubSciNameText'][i]
        CN = dfUSpp['strCommonName'][i]
        SC = dfUSpp['strUC'][i]
        
        # Get the taxon from the species code
        if spp[0] == 'a':
            taxa = 'Amphibians'
        elif spp[0] == 'b':
            taxa = 'Birds'
        elif spp[0] == 'm':
            taxa = 'Mammals'
        else:
            taxa = 'Reptiles'
            
        # Build an SQL statement that returns relevant fields in the
        # appropriate taxa table tblRanges_<taxa> using a species code
        # Limit the HUC codes to only CONUS - i.e. < 190000000000    
        
        sql = """SELECT t.strHUC12RNG, t.strUC, t.intGapOrigin, t.intGapPres, 
                    t.intGapRepro, t.intGapSeas 
                    FROM dbo.tblRanges_""" + taxa + """ as t
                    WHERE (t.strUC = '""" + str(spp) + """') 
                    AND t.strHUC12RNG < '190000000000'"""
        
        dfRngHUCs = pd.read_sql(sql, sppConn)
        
        # Select only known, possibly, or potentially present;
        #             year-round, winter, or summer seasons
        select={'intGapPres':[1,2,3], 'intGapSeas':[1,3,4]}
        dfS1 = dfRngHUCs[dfRngHUCs[list(select)].isin(select).all(axis=1)]
        # Get the strHUC12RNG column into a set
        dfS1Set = set(dfS1[dfS1.columns[0]].tolist())
        
        # Subtract this species' range HUC set from the shapefile's HUC set
        # to see if the set is empty => all range HUCs for the species would
        # then be entirely within the shapefile's interior HUCs
        if len(dfS1Set - selHUCsSet) == 0:
            print SN, "range is endemic to the input shapefile\n"
            # Add the species' info to a dataframe
            dfMaster = dfMaster.append({'Species Code':SC, 
                                        'Scientific Name':SN,
                                        'subspecies Name':SSN,
                                        'Common Name':CN}, ignore_index=True)
        else:
            print "Range not endemic to AOI. Moving on to next species...\n"
    
    
    # Check to see if there are any species with their range entirely
    # within the designated shapefile. If not print message to the screen
    if len(dfMaster) == 0:
        print " ========= No species have endemic range within the AOI =========\n"
    else:
        # Reorder columns in completed dataframe
        dfMaster = dfMaster[['Species Code', 'Scientific Name','Common Name']]
        # Export to text file
        outFileName = workDir + keyword + "EndemicSpeciesList.txt"
        dfMaster.to_csv(outFileName)
        # Return dfMaster
        return outFileName
    
    # Delete cursors and close db connections
    sppConn.close()
    whrConn.close()
    del sppCursor, sppConn
    del whrCursor, whrConn
    del dfAllSpp, dfUnique, dfSort, dfUSpp
    del dfS1, dfS1Set
    
    endtime = datetime.now()
    delta = endtime - starttime
    print "+"*35
    print "Processing time: " + str(delta)
    print "+"*35
    print("!!!  BE SURE TO READ THE NOTES IN THE DOCUMENTATION  !!!")
        if numPPLrows != numMQrows:
            print('ERROR: Wrong number of rows exported for link FC; {} versus {}').format(numMQrows,numPPLrows)
            errorLogic = 1
        else:
            print('Correct number of rows exported for link FC.')
        # Dissolve on ID
        print('Dissolving on ID...')
        dfield = tblPropLink + "_" + propIDfield
        sfield = tblPropLink + "_" + parIDfield
        statsfield = [[sfield,"COUNT"]]
        arcpy.Dissolve_management(fcPrclPLink,fcDsslvIDPath,dfield,statsfield)
        # Join the TP_Property table
        print('Preparing to join property table...')
        # Create temporary layer/view
        arcpy.MakeFeatureLayer_management(fcDsslvIDPath,"dsslvlyr")
        arcpy.MakeTableView_management(tblPropPath,"proptblview")
        # Make join
        arcpy.AddJoin_management("dsslvlyr",dfield,"proptblview",propIDfield,"KEEP_ALL")
        print('\tProperty table joined.')
        print('Copying features...')
        # Output
        arcpy.CopyFeatures_management("dsslvlyr",fcCnnctPPath)
        
        endtime = datetime.datetime.now()
        elapsedtime = endtime - starttime
        print('DONE.  Time taken... {} H:MM:SS.dddddd').format(elapsedtime)

        ### Note that Technical Specialist should be advised for copy to pre-prod
        print('***NOTE: next step/s = advise Technical Specialist that data is ready for copy to pre-prod')

except:
コード例 #13
0
 arcpy.MakeTableView_management(York_Edit_GIS_Land_Base_CAMA, GIS_Land_Base_CAMA_View, "", "",\
  "OBJECTID OBJECTID VISIBLE NONE;\
  PIDN PIDN VISIBLE NONE;\
  DISTRICT DISTRICT VISIBLE NONE;\
  BLOCK BLOCK VISIBLE NONE;MAP MAP VISIBLE NONE;\
  PARCEL PARCEL VISIBLE NONE;\
  PARCEL_MAJOR PARCEL_MAJOR VISIBLE NONE;\
  PARCEL_MINOR PARCEL_MINOR VISIBLE NONE;\
  LEASEHD LEASEHD VISIBLE NONE;\
  PIDN_LEASE PIDN_LEASE VISIBLE NONE;\
  CARD_NO CARD_NO VISIBLE NONE;\
  MASTER MASTER VISIBLE NONE;\
  DEED_BK DEED_BK VISIBLE NONE;\
  DEED_PG DEED_PG VISIBLE NONE;\
  SITE_ST_NO SITE_ST_NO VISIBLE NONE;\
  SITE_ST_DIR SITE_ST_DIR VISIBLE NONE;\
  SITE_ST_NAME SITE_ST_NAME VISIBLE NONE;\
  SITE_ST_SUF SITE_ST_SUF VISIBLE NONE;\
  PROPADR PROPADR VISIBLE NONE;\
  OWNER_FULL OWNER_FULL VISIBLE NONE;\
  OWN_NAME1 OWN_NAME1 VISIBLE NONE;\
  OWN_NAME2 OWN_NAME2 VISIBLE NONE;\
  MAIL_ADDR_FULL MAIL_ADDR_FULL VISIBLE NONE;\
  MAIL_ADDR1 MAIL_ADDR1 VISIBLE NONE;\
  MAIL_ADDR2 MAIL_ADDR2 VISIBLE NONE;\
  MAIL_ADDR3 MAIL_ADDR3 VISIBLE NONE;\
  PREV_OWNER PREV_OWNER VISIBLE NONE;\
  CLASS CLASS VISIBLE NONE;\
  LUC LUC VISIBLE NONE;\
  ACRES ACRES VISIBLE NONE;\
  STYLE STYLE VISIBLE NONE;\
  NUM_STORIE NUM_STORIE VISIBLE NONE;\
  RES_LIVING_AREA RES_LIVING_AREA VISIBLE NONE;\
  YRBLT YRBLT VISIBLE NONE;\
  CLEAN_GREEN CLEAN_GREEN VISIBLE NONE;\
  HEATSYS HEATSYS VISIBLE NONE;\
  FUEL FUEL VISIBLE NONE;\
  UTILITY UTILITY VISIBLE NONE;\
  APRLAND APRLAND VISIBLE NONE;\
  APRBLDG APRBLDG VISIBLE NONE;\
  APRTOTAL APRTOTAL VISIBLE NONE;\
  SALEDT SALEDT VISIBLE NONE;\
  PRICE PRICE VISIBLE NONE;\
  PREV_PRICE PREV_PRICE VISIBLE NONE;\
  SCHOOL_DIS SCHOOL_DIS VISIBLE NONE;\
  COMM_STRUC COMM_STRUC VISIBLE NONE;\
  COMM_YEAR_BUILT COMM_YEAR_BUILT VISIBLE NONE;\
  COMM_BUILDING_SQ_FT COMM_BUILDING_SQ_FT VISIBLE NONE;\
  GRADE GRADE VISIBLE NONE;\
  CDU CDU VISIBLE NONE;\
  GlobalID GlobalID VISIBLE NONE;\
  HYPERLINK HYPERLINK VISIBLE NONE"                                          )
コード例 #14
0
ファイル: dlaStage.py プロジェクト: xiaoai-li/data-assistant
def stage(xmlFileNames):
    global source, target, rowLimit

    dla.setWorkspace()
    dla._errCount = 0
    outlayers = []

    for xmlFileName in xmlFileNames.split(';'):
        xmlFileName = dla.getXmlDocName(xmlFileName)
        xmlDoc = dla.getXmlDoc(xmlFileName)
        prj = dla.setProject(xmlFileName, dla.getNodeValue(xmlDoc, "Project"))
        if prj == None:
            dla.addError(
                "Unable to open your project, please ensure it is in the same folder as your current project or your Config file"
            )

        if rowLimit == "" or rowLimit == None:
            rowLimit = None
        if source == "" or source == None:
            source = dla.getDatasetPath(xmlDoc, "Source")
        if target == "" or target == None:
            target = dla.getDatasetPath(xmlDoc, "Target")

        if dla.isTable(source) or dla.isTable(target):
            datasetType = 'Table'
        else:
            datasetType = 'FeatureClass'

        targetName = dla.getStagingName(source, target)
        targetDS = os.path.join(dla.workspace, targetName)

        res = dlaExtractLayerToGDB.extract(xmlFileName, rowLimit,
                                           dla.workspace, source, targetDS,
                                           datasetType)
        if res == True:
            res = dlaFieldCalculator.calculate(xmlFileName, dla.workspace,
                                               targetName, False)

            if res == True:
                arcpy.env.addOutputsToMap = True
                layer = targetName
                layertmp = targetName + "tmp"
                if arcpy.Exists(layertmp):
                    arcpy.Delete_management(layertmp)
                if dla.isTable(targetDS):
                    arcpy.MakeTableView_management(targetDS, layertmp)
                else:
                    arcpy.MakeFeatureLayer_management(targetDS, layertmp)
                fieldInfo = dla.getLayerVisibility(layertmp, xmlFileName)
                if dla.isTable(targetDS):
                    arcpy.MakeTableView_management(targetDS, layer, None,
                                                   dla.workspace, fieldInfo)
                else:
                    arcpy.MakeFeatureLayer_management(targetDS, layer, None,
                                                      dla.workspace, fieldInfo)
                # should make only the target fields visible
                outlayers.append(layer)
                ### *** need to insert tag in xml file...
                dla.insertStagingElement(xmlDoc)
                try:
                    xmlDoc.writexml(open(xmlFileName, 'wt', encoding='utf-8'))
                    dla.addMessage('Staging element written to config file')
                except:
                    dla.addMessage("Unable to write data to xml file")
                xmlDoc.unlink()
        else:
            dla.addError("Failed to Extract data")
            print("Failed to Extract data")
    if outlayers != []:
        arcpy.SetParameter(_derived, ";".join(outlayers))
    dla.writeFinalMessage("Data Assistant - Stage")
コード例 #15
0
ファイル: veti.py プロジェクト: zhangwt-per/GeoProject
def vtei(land_in_region, cellarea, regionid, regionname, land, codes):
    landview = 'landview'
    arcpy.MakeTableView_management(land_in_region, landview)

    # 转为数组
    arr = arcpy.da.TableToNumPyArray(landview, '*')
    arcpy.Delete_management(landview)

    # 总用地面积
    TOT = float(arr['Count'].sum() * cellarea)

    # 1. 耕地面积
    AGR = float(sum_area_by_codes(arr, codes['AGR'], cellarea))

    # 耕地比重%
    AGR_P = AGR / TOT * 100

    # 2. 林地面积
    FRT = float(sum_area_by_codes(arr, codes['FRT'], cellarea))

    # 林地比重%
    FRT_P = FRT / TOT * 100

    # 3. 草地面积
    GRS = float(sum_area_by_codes(arr, codes['GRS'], cellarea))

    # 草地比重
    GRS_P = GRS / TOT * 100

    # 4. 水域湿地面积
    WAT = float(sum_area_by_codes(arr, codes['WAT'], cellarea))

    # 水域湿地比重%
    WAT_P = WAT / TOT * 100

    # 5. 建设用地面积
    BUL = float(sum_area_by_codes(arr, codes['BUL'], cellarea))

    # 建设用地比重%
    BUL_P = BUL / TOT * 100

    # 6. 未利用地面积
    UUS = float(sum_area_by_codes(arr, codes['UUS'], cellarea))

    # 未利用地比重
    UUS_P = UUS / TOT * 100

    # 生境质量指数
    EQI = (0.35 * FRT_P + 0.21 * GRS_P + 0.28 * WAT_P + 0.11 * AGR_P +
           0.04 * BUL_P + 0.01 * UUS_P)

    # 植被覆盖指数
    VCI = (0.38 * FRT_P + 0.34 * GRS_P + 0.19 * AGR_P + 0.07 * BUL_P +
           0.02 * UUS_P)

    # 水网密度指数
    WDI = WAT_P

    # 人类干扰指数
    HDI = (0.90 * BUL_P + 0.10 * AGR_P)

    # 生态环境质量指数
    VTEI = (0.30 * EQI + 0.25 * VCI + 0.25 * WDI + 0.20 * (100 - HDI))
    veti_data_list = [
        regionname, TOT, AGR, AGR_P, FRT, FRT_P, GRS, GRS_P, WAT, WAT_P, BUL,
        BUL_P, UUS, UUS_P, EQI, VCI, WDI, HDI, VTEI
    ]
    return veti_data_list
コード例 #16
0
            insertRows.insertRow(searchRow)
        del searchRow, searchRows
    del insertRows
    arcpy.SetLogHistory = True

before_count = int(arcpy.GetCount_management(t).getOutput(0))
check_fields = [f.name for f in arcpy.ListFields(t) if f.name != 'OBJECTID']
arcpy.DeleteIdentical_management(ALL_XREF_TABLE, check_fields)
after_count = int(arcpy.GetCount_management(t).getOutput(0))
arcpy.AddIndex_management(ALL_XREF_TABLE, "NewReachCode", "IDX_NewReachCode")
arcpy.AddIndex_management(ALL_XREF_TABLE, "OldReachCode", "IDX_OldReachCode")

# Step 5: Select only NHDReachCrossReference rows that have a corresponding lake (the rest are stream reaches, etc.)
# with a join. For some reason, Join Field is way too slow and others have noted that. Use Add Join instead.
# Also field mappings are too annoying so copy and then delete fields instead
arcpy.MakeTableView_management(ALL_XREF_TABLE, 'xref_lyr')
arcpy.MakeTableView_management(ALL_LAKES_FC, 'lakes_lyr')
keep_fields = [f.name for f in arcpy.ListFields(ALL_XREF_TABLE)]
underscore_perm_id_field = '{}_Permanent_Identifier'.format(
    os.path.splitext(os.path.basename(ALL_LAKES_FC))[0])
keep_fields.append(underscore_perm_id_field)
arcpy.AddJoin_management('xref_lyr', 'NewReachCode', 'lakes_lyr', 'ReachCode')
print([f.name for f in arcpy.ListFields('xref_lyr')])

# Copy table, with selection
dot_perm_id_field = '{}.Permanent_Identifier'.format(
    os.path.splitext(os.path.basename(ALL_LAKES_FC))[0])
arcpy.TableToTable_conversion('xref_lyr', os.path.dirname(ALL_XREF_TABLE),
                              os.path.basename(LAKES_XREF_TABLE),
                              '{} is not null'.format(joined_perm_id_field))
arcpy.RemoveJoin_management('xref_lyr')
コード例 #17
0
output_cell_size = arcpy.env.cellSize
arcpy.env.extent = elevation_raster
arcpy.env.overwriteOutput = True
arcpy.env.parallelProcessingFactor = "75%"
#arcpy.Delete_management("in_memory")

#extract elevations to stations
arcpy.AddMessage("Extracting elevations")
fcStations_wElevation = scratchGDB + "/stations_wElevation"
arcpy.gp.ExtractValuesToPoints_sa(station_locations, elevation_raster,
                                  fcStations_wElevation, "NONE", "VALUE_ONLY")

#Calculate precipitation averages over the 3 hour time period (ignoring "no-data" values: -999 etc.)
#and join to stations_wElevation
arcpy.AddMessage("Calculating average precipitation")
arcpy.MakeTableView_management(data_table, "table1", "ppts > -500")
arcpy.Statistics_analysis("table1", "in_memory/tableAverage", "ppts MEAN",
                          "site_key")
arcpy.JoinField_management(fcStations_wElevation, "Site_Key",
                           "in_memory/tableAverage", "site_key", "MEAN_ppts")

#Delete rows from stations_wElevation that have negative or null elevations
arcpy.AddMessage(
    "Removing \"no-data\" rows from station locations (eg. negative elevations)"
)
cursor = arcpy.UpdateCursor(fcStations_wElevation)
for row in cursor:
    if row.getValue("RASTERVALU") < 0 or row.getValue("RASTERVALU") == "None":
        cursor.deleteRow(row)
del cursor
del row
コード例 #18
0
def Get_Prev_Tracking_Num(parcel_def):
    arcpy.AddMessage('Getting Previous Captured TR#...')
    #---------------------------------------------------------------------------
    # Create a layer view of the 'Projects' in a FGDB
    lossFootprints = habitrak_loss_footprints_FGDB + '\\' + habitrak_loss_footprints_name
    arcpy.MakeFeatureLayer_management(lossFootprints, 'lossFootprints')

    # Create a table fiew of the 'Loss' table
    arcpy.MakeTableView_management(habitrak_loss_table, 'lossTable')

    #---------------------------------------------------------------------------
    # Now Join the Loss table to the Projects Feature Layer
    in_layer_or_view = 'lossFootprints'
    in_field = 'PROJECTID'
    join_table = 'lossTable'
    join_field = 'PROJECTID'
    join_type = 'KEEP_ALL'
    arcpy.AddJoin_management(in_layer_or_view, in_field, join_table,
                             join_field, join_type)

    #---------------------------------------------------------------------------
    # make feature layer of parcels
    parcels_path = SDE_source_for_ATLANTIC + '\\SDE.SANGIS.PARCELS_ALL'
    arcpy.MakeFeatureLayer_management(parcels_path, 'parcels_layer')

    # Select the parcel
    in_layer_or_view = 'parcels_layer'
    selection_type = 'NEW_SELECTION'
    where_clause = "APN_8 =" + parcel_def

    arcpy.SelectLayerByAttribute_management(in_layer_or_view, selection_type,
                                            where_clause)

    #---------------------------------------------------------------------------
    # If not parcels selected, we don't want to continue
    result = arcpy.GetCount_management(in_layer_or_view)
    count = int(result.getOutput(0))

    if count == 0:
        tracking_num = 'Parcel not selected.  Tracking Number couldn\'t be determined.'
        return tracking_num
    #---------------------------------------------------------------------------

    # Select the project the parcel intersects
    arcpy.AddMessage('Selecting the project the parcel intersects')
    in_layer_or_view = 'lossFootprints'
    overlap_type = 'INTERSECT'
    select_features = 'parcels_layer'
    search_distance = ''
    selection_type = 'NEW_SELECTION'

    arcpy.SelectLayerByLocation_management(in_layer_or_view, overlap_type,
                                           select_features, search_distance,
                                           selection_type)

    #---------------------------------------------------------------------------
    # If no project is selected, we don't want to continue
    result = arcpy.GetCount_management(in_layer_or_view)
    count = int(result.getOutput(0))
    if count == 0:
        tracking_num = 'No project selected.  Tracking Number coultn\'t be determined.'
        return tracking_num
    #---------------------------------------------------------------------------

    # Get the TR # of the selected project
    field = 'Loss.TRACKNO'
    cursor = arcpy.SearchCursor('lossFootprints')
    row = cursor.next()
    projects = []

    while row:
        project = row.getValue(field)
        projects.append(project)
        row = cursor.next()

    # Make string of the project(s)
    # If there are more than one project, make a string readable and return it
    # to PerformAllTestParcels
    tracking_num = ' and '.join(projects)

    return tracking_num
コード例 #19
0
    arcpy.MakeFeatureLayer_management(OLD_Parcels, OldParcels_Layer,
                                      whereClause, "", fieldInfo)

    # Process: 1st Dissolve
    arcpy.Dissolve_management(OldParcels_Layer, OldParcel_Neighborhoods_shp,
                              "Neighborhoods", "LRSN COUNT", "MULTI_PART",
                              "DISSOLVE_LINES")
    ScriptUtils.AddMsgAndPrint(
        "\tCreated {0}...".format(OldParcel_Neighborhoods_shp), 0)

    ScriptUtils.AddMsgAndPrint("\tCompiling new Parcel data...", 0)

    # Process: Make Table View of pva.remf_master
    where = "PVA_NEIGHBOR >= 100000"
    # Create a fieldinfo object
    arcpy.MakeTableView_management(pva_remf_master, "tmpremf_master")
    fields = arcpy.ListFields("tmpremf_master")
    fldInfo = arcpy.FieldInfo()
    lstFields = ["PVA_NEIGHBOR", "PARCELID", "LRSN"]
    # Iterate through the fields and set them to fieldinfo
    for field in fields:
        name = field.name
        if name in lstFields:
            if name == "PVA_NEIGHBOR":
                fldInfo.addField(name, "Neighborhoods", "VISIBLE", "")
            else:
                fldInfo.addField(name, name, "VISIBLE", "")
        else:
            fldInfo.addField(name, name, "HIDDEN", "")
    arcpy.MakeTableView_management(pva_remf_master, remf_View, where, "",
                                   fldInfo)
コード例 #20
0
def Report_MODULE(PARAMS):
    """Report Generation"""
    start = time.clock()  # start the clock
    message("Generating report...")
    # Report_PARAMS = [outTbl, siteName, mxd, pdf]

    outTbl = PARAMS[0]
    siteNameFld = str(PARAMS[1])
    mxd = arcpy.mapping.MapDocument(PARAMS[2])
    # Set file name, ext, and remove file if it already exists
    pdf = PARAMS[3]
    if os.path.splitext(pdf)[1] == "":
        pdf += ".pdf"
    if os.path.exists(pdf):
        os.remove(pdf)
    # Set path for intermediate pdfs
    pdf_path = os.path.dirname(pdf) + os.sep

    # Create the file and append pages in the cursor loop
    pdfDoc = arcpy.mapping.PDFDocumentCreate(pdf)

    graphic = "GRAPHIC_ELEMENT"
    blackbox = arcpy.mapping.ListLayoutElements(mxd, graphic, "blackbox")[0]
    graybox = arcpy.mapping.ListLayoutElements(mxd, graphic, "graybox")[0]

    # dictionary for field, type, ltorgt, numDigits, allnos, & average
    fld_dct = {
        'field': [
            'FR_2_cnt', 'FR_3A_acr', 'FR_3A_boo', 'FR_3B_boo', 'FR_3B_sca',
            'FR_3D_boo', 'V_2_50', 'V_2_100', 'V_2_score', 'V_2_boo',
            'V_3A_boo', 'V_3B_scar', 'V_3C_comp', 'V_3D_boo', 'EE_2_cnt',
            'EE_3A_boo', 'EE_3B_sca', 'EE_3C_boo', 'EE_3D_boo', 'R_2_03',
            'R_2_03_tb', 'R_2_03_bb', 'R_2_05', 'R_2_6', 'R_3A_acr',
            'R_3B_sc06', 'R_3B_sc1', 'R_3B_sc12', 'R_3C_boo', 'R_3D_boo',
            'B_2_cnt', 'B_2_boo', 'B_3A_boo', 'B_3C_boo', 'B_3D_boo',
            'Vul_High', 'Conserved'
        ]
    }
    txt, dbl = 'Text', 'Double'
    fld_dct['type'] = [
        dbl, dbl, txt, txt, dbl, txt, dbl, dbl, dbl, txt, txt, dbl, dbl, txt,
        dbl, txt, dbl, txt, txt, dbl, txt, txt, dbl, dbl, dbl, dbl, dbl, dbl,
        txt, txt, dbl, txt, txt, txt, txt, dbl, dbl
    ]
    fld_dct['ltorgt'] = [
        'gt', 'gt', '', '', 'lt', '', 'gt', 'gt', 'gt', '', '', 'lt', 'gt', '',
        'gt', '', 'lt', '', '', 'gt', '', '', 'gt', 'gt', 'gt', 'lt', 'lt',
        'lt', '', '', 'gt', '', '', '', '', 'gt', 'gt'
    ]
    fld_dct['aveBool'] = [
        '', '', 'YES', 'NO', '', 'YES', '', '', '', 'YES', 'YES', '', '',
        'YES', '', 'YES', '', 'YES', 'YES', '', 'YES', 'YES', '', '', '', '',
        '', '', 'YES', 'YES', '', 'YES', 'YES', 'YES', 'YES', '', ''
    ]
    fld_dct['numDigits'] = [
        0, 2, 0, 0, 2, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0,
        0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 2, 2
    ]
    fld_dct['rowNum'] = [
        1, 2, 3, 4, 5, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21,
        22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 36, 37, 38, 39
    ]
    fld_dct['allnos'] = [''] * 37
    fld_dct['average'] = [''] * 37

    # Make table layer from results table
    arcpy.MakeTableView_management(outTbl, "rptbview")
    desc = arcpy.Describe("rptbview")
    fieldInfo = desc.fieldInfo
    cnt_rows = str(arcpy.GetCount_management(outTbl))

    for field in fld_dct['field']:  # loop through fields
        idx = fld_dct['field'].index(field)
        # Check to see if field exists in results
        fldIndex = fieldInfo.findFieldByName(fld_dct['field'][idx])
        if fldIndex > 0:  # exists
            if fld_dct['type'][idx] == 'Text':  # narrow to yes/no
                # Copy text field to list by field index
                fld_dct[idx] = field_to_lst(outTbl, field)
                # Check if all 'NO'
                if fld_dct[idx].count("NO") == int(cnt_rows):
                    fld_dct['allnos'][idx] = 1
            else:  # type = Double
                l = [x for x in field_to_lst(outTbl, field) if x is not None]
                if l != []:  # if not all null
                    # Get average values
                    fld_dct['average'][idx] = mean(l)

    start = exec_time(start, "loading data for report")

    i = 1
    pg_cnt = 1
    siterows = arcpy.SearchCursor(outTbl, "")  # may be slow, use "rptbview"?
    siterow = siterows.next()

    while siterow:

        oddeven = i % 2
        if oddeven == 1:
            column = 1
            siteText = "SiteLeftText"
            site_Name = "SiteLeftName"
        else:
            column = 2
            siteText = "SiteRightText"
            site_Name = "SiteRightName"
        TE = "TEXT_ELEMENT"
        siteText = arcpy.mapping.ListLayoutElements(mxd, TE, siteText)[0]
        siteText.text = "Site " + str(i)

        # Text element processing
        siteName = arcpy.mapping.ListLayoutElements(mxd, TE, site_Name)[0]
        fldNameValue = "siterow." + siteNameFld
        if fieldInfo.findFieldByName(siteNameFld) > 0:
            if eval(fldNameValue) == ' ':
                siteName.text = "No name"
            else:
                siteName.text = eval(fldNameValue)
        else:
            siteName.text = "No name"

        # loop through expected fields in fld_dct['field']
        for field in fld_dct['field']:
            idx = fld_dct['field'].index(field)
            # Check to see if field exists in results
            # if it doesn't color = black
            if fldExists(field, column, fld_dct['rowNum'][idx], fieldInfo,
                         blackbox):
                fldVal = "siterow." + field
                if fld_dct['type'][idx] == 'Double':  # is numeric
                    proctext(eval(fldVal), "Num", fld_dct['numDigits'][idx],
                             fld_dct['ltorgt'][idx], fld_dct['average'][idx],
                             column, fld_dct['rowNum'][idx],
                             fld_dct['allnos'][idx], mxd)
                else:  # is boolean
                    proctext(eval(fldVal), "Boolean", 0, "",
                             fld_dct['aveBool'][idx], column,
                             fld_dct['rowNum'][idx], fld_dct['allnos'][idx],
                             mxd)
        if oddeven == 0:
            exportReport(pdfDoc, pdf_path, pg_cnt, mxd)
            start = exec_time(start, "Page " + str(pg_cnt) + " generation")
            pg_cnt += 1

        i += 1
        siterow = siterows.next()

    # If you finish a layer with an odd number of records,
    # last record was not added to the pdf.
    if oddeven == 1:
        # Blank out right side
        siteText = arcpy.mapping.ListLayoutElements(mxd, "TEXT_ELEMENT",
                                                    "SiteRightText")[0]
        siteText.text = " "
        # Fill right side with gray empty boxes
        for i in range(39):
            # Not set up to process the Social Equity or Reliability scores
            newBox = graybox.clone("_clone")
            boxpos(newBox, 2, i + 1)
        exportReport(pdfDoc, pdf_path, pg_cnt, mxd)

    del siterow
    del siterows

    arcpy.Delete_management("rptbview", "")

    pdfDoc.saveAndClose()

    mxd_result = os.path.splitext(pdf)[0] + ".mxd"
    if arcpy.Exists(mxd_result):
        arcpy.Delete_management(mxd_result)

    mxd.saveACopy(mxd_result)  # save last page just in case

    del mxd
    del pdfDoc
    mxd_name = os.path.basename(mxd_result)
    message("Created PDF Report: {} and {}".format(pdf, mxd_name))
コード例 #21
0
    # Get nearby parcel IDs
    nearby_parcels = []
    with arcpy.da.SearchCursor(parcel_layer, tid_field) as parcel_cursor:
        nearby_parcels = ["\'%s\'" %(r[0]) for r in parcel_cursor]

    # Table definition query
    if len(nearby_parcels) > 1:
        table_tid_string = ", ".join(nearby_parcels)
    elif len(nearby_parcels) == 1:
        table_tid_string = nearby_parcels[0]
    else:
        table_tid_string = ""
    table_where = "%s IN (%s)" %(table_tid_field, table_tid_string)

    # Make table view with subsetted entries
    arcpy.MakeTableView_management(assessor_table, table_view, table_where)

    # ========= Write out to csv ===========
    arcpy.AddMessage("Creating CSV...")

    # Create CSV of records from new feature class
    csv_file = os.path.join(arcpy.env.scratchFolder, "Addresses.csv")
    with open(csv_file, 'w') as csvfile:
        csvfile.write("sep=|\n")
        writer = csv.writer(csvfile, delimiter='|', lineterminator='\n')
        with arcpy.da.SearchCursor(table_view, field_names=address_fields) as cursor:
            writer.writerow(address_fields)
            for row in cursor:
                writer.writerow(row)

    # Sends path of the csv file back to the service handler
コード例 #22
0
                                 SearchDistance, "NO_LOCATION", "NO_ANGLE",
                                 "ALL")
# alter the name of field: in the previous table "IN_FID" stands for segmentsFC1, "NEAR_FID"-->segmentsFC2
arcpy.AlterField_management(NearTable, "IN_FID", "SegFC1_ID")
arcpy.AlterField_management(NearTable, "NEAR_FID", "SegFC2_ID")
# reduce the near table to just the non-touching features -- NearDist
arcpy.TableSelect_analysis(NearTable, NearDist, "NEAR_DIST > 0")

# add fields for from feature angle, to feature angle
arcpy.AddField_management(NearDist, "FromAngle", "DOUBLE")
arcpy.AddField_management(NearDist, "ToAngle", "DOUBLE")
arcpy.AddField_management(NearDist, "AngleDiff", "DOUBLE")

# create a join to copy the angles to the fromAngle and toAngle fields
arcpy.AddMessage("Copying angles")
arcpy.MakeTableView_management(NearDist, "ND")
arcpy.AddJoin_management("ND", "SegFC1_ID", SegmentsFC1, "OBJECTID")
arcpy.CalculateField_management("ND", "NearDist.FromAngle",
                                "!Segments1.Angle!", "PYTHON")
arcpy.RemoveJoin_management("ND")

arcpy.AddJoin_management("ND", "SegFC2_ID", SegmentsFC2, "OBJECTID")
arcpy.CalculateField_management("ND", "NearDist.ToAngle", "!Segments2.Angle!",
                                "PYTHON")
arcpy.RemoveJoin_management("ND")

# calculate the difference in angle
arcpy.AddMessage("Resolving differences of angles")
arcpy.CalculateField_management(NearDist, "AngleDiff",
                                "abs(!FromAngle! - !ToAngle!)", "PYTHON")
# flip the AngleDiff if it is an larger angle
コード例 #23
0
if len(unrecognizedTypes) == 0:
    addMsgAndPrint('    none')
else:
    for t in unrecognizedTypes:
        if t <> None:
            addMsgAndPrint('    ' + t)
        else:
            addMsgAndPrint('    missing type value')
addMsgAndPrint('  ')

if setPolys:
    if arcpy.Exists(dmu) and arcpy.Exists(mup):
        addMsgAndPrint('  setting Symbol and Label values in MapUnitPolys')
        mupTable = 'mupTable'
        testAndDelete(mupTable)
        arcpy.MakeTableView_management(mup, mupTable)
        # check to see if join already exists
        joinAdded = True
        fields = arcpy.ListFields(mupTable)
        for f in fields:
            if f.name.find('DescriptionOfMapUnits.Symbol') > -1:
                joinAdded = False
        # else add join
        if joinAdded:
            arcpy.AddJoin_management(mupTable, 'MapUnit', dmu, 'MapUnit')

        # get field names for Symbol, Label
        mupSymbol = os.path.basename(mup) + '.Symbol'
        mupLabel = os.path.basename(mup) + '.Label'
        # calculate Symbol
        arcpy.CalculateField_management(mupTable, mupSymbol,
コード例 #24
0
        Landuses.append( ( LU_future_file, LU_future_fld, "F" ) )
    if Landuses == []:   
        hp.log("   \nThere are no parameters selected to calculate production for!!")
        raise Exception
    
    for LU in Landuses:
        hp.log("Join LUT to landuse layer")
        arcpy.MakeFeatureLayer_management(LU[0], "LULyr")
        
        input = file(os.path.join(hp.AppPath, r"..\Tooldata\LUT.csv"), 'r')
        output = file(os.path.join(hp.SWorkspace, "LUT.txt"), 'w')
        output.write(input.read().replace(",", "\t"))
        input.close()
        output.close()
        
        arcpy.MakeTableView_management(os.path.join(hp.SWorkspace, "LUT.txt"), "LUTview")
        arcpy.AddJoin_management("LULyr" , LU[1], "LUTview", "TABLE_MATC")

        
        hp.log("Create Export Coefficient (washoff rate) rasters")
        for param in params:
            pn = param[:10].strip()
            hp.log( '  Parameter: ' + param)
            arcpy.PolygonToRaster_conversion("LULyr", param, os.path.join(hp.SWorkspace,"temp"), "MAXIMUM_AREA", param, hp.units['size'])
            lu2temp = Raster(os.path.join(hp.SWorkspace,"temp")) * float(hp.units['cellsqft']/43560)
            hp.saveRasterOutput(lu2temp, LU[2] + pn) ######################
    
    hp.log("Create roughness grid")
    arcpy.PolygonToRaster_conversion("LULyr", 'MANNINGSN', os.path.join(hp.SWorkspace,"MANNINGSN"), "MAXIMUM_AREA", 'MANNINGSN', hp.units['size'])
    
    hp.log("Calculate overland flow velocity")
コード例 #25
0
def adjacentMapUnits(inFds, outFds, outHtml, validateCafTopology, planCaf):
    # get CAF and MUP
    inCaf = getCaf(inFds)
    inMup = inCaf.replace('ContactsAndFaults', 'MapUnitPolys')
    if not arcpy.Exists(inMup):
        addMsgAndPrint('Cannot find MapUnitPolys feature class ' + inMup)
        raise arcpy.ExecuteError
    if not validateCafTopology:
        testAndDelete(planCaf)
        arcpy.FeatureToLine_management(inCaf, planCaf)
    # IDENTITY planCAF with MUP to make idCaf
    idCaf = outFds + '/' + os.path.basename(inCaf) + '_MUPid'
    testAndDelete(idCaf)
    addMsgAndPrint('  IDENTITYing ' + planCaf + '\n    with ' + inMup +
                   ' to get adjoining polys')
    arcpy.Identity_analysis(planCaf, inMup, idCaf, 'ALL', '',
                            'KEEP_RELATIONSHIPS')
    # get ordered list of mapUnits from DMU
    addMsgAndPrint('  getting ordered list of map units from DMU table')
    sortedDMU = os.path.dirname(outFds) + '/sortedDMU'
    testAndDelete(sortedDMU)
    dmu = os.path.dirname(inFds) + '/DescriptionOfMapUnits'
    testAndDelete('dmuView')
    arcpy.MakeTableView_management(dmu, 'dmuView')
    arcpy.Sort_management('dmuView', sortedDMU,
                          [['HierarchyKey', 'ASCENDING']])
    dmuUnits = []
    with arcpy.da.SearchCursor(sortedDMU, ['MapUnit']) as cursor:
        for row in cursor:
            if row[0] != None and row[0] != '':
                dmuUnits.append(row[0])
    if debug3: addMsgAndPrint('dmuUnits = ' + str(dmuUnits))
    testAndDelete(sortedDMU)
    # SearchCursor through idCaf
    addMsgAndPrint('  building dictionaries of line adjacencies')
    concealedLinesDict = {}
    faultLinesDict = {}
    contactLinesDict = {}
    internalContacts = []
    badConcealed = []
    fields = [
        'Type', 'IsConcealed', 'RIGHT_MapUnit', 'LEFT_MapUnit', 'Shape_Length',
        'OBJECTID'
    ]
    if debug3: addMsgAndPrint(str(numberOfRows(idCaf)) + ' rows in ' + idCaf)
    with arcpy.da.SearchCursor(idCaf, fields) as cursor:
        for row in cursor:
            if debug3: addMsgAndPrint(str(row))
            try:
                lr = row[3] + '|' + row[2]
            except:
                addMsgAndPrint(str(row))
                lr = '--|--'
            if row[1].upper() == 'Y':  # IsConcealed = Y
                if debug3: addMsgAndPrint('*** is concealed')
                addRowToDict(lr, row, concealedLinesDict)
                if row[3] != row[2]:
                    badConcealed.append(
                        [row[3], row[2], row[0], row[1], row[5], row[4]])
            elif isContact(row[0]):
                if debug3: addMsgAndPrint('*** is a contact')
                addRowToDict(lr, row, contactLinesDict)
                if row[3] == row[2]:
                    internalContacts.append(
                        [row[3], row[2], row[0], row[1], row[5], row[4]])
            elif isFault(row[0]):  # it's a fault
                addRowToDict(lr, row, faultLinesDict)
    outHtml.write('<h3>MapUnits adjacent to CAF lines</h3>\n')
    outHtml.write(
        'See feature class ' + os.path.basename(idCaf) +
        ' for ContactsAndFaults arcs attributed with adjacent polygon information. \n'
    )
    outHtml.write(
        '<i>In tables below, upper cell value is number of arcs. Lower cell value is cumulative arc length in map units.</i><br><br>\n'
    )
    writeLineAdjacencyTable('Concealed contacts and faults', outHtml,
                            concealedLinesDict, dmuUnits, 'badConcealed')
    outHtml.write('<br>\n')
    writeLineAdjacencyTable('Contacts (not concealed)', outHtml,
                            contactLinesDict, dmuUnits, 'internalContacts')
    outHtml.write('<br>\n')
    writeLineAdjacencyTable('Faults (not concealed)', outHtml, faultLinesDict,
                            dmuUnits, '')
    testAndDelete('dmuView')
    return badConcealed, internalContacts, idCaf
コード例 #26
0
def route_data_mile(route, park, block):
    new_tbl = str(block)[:-4] + "_" + str(route)[:-4]
    arcpy.CopyRows_management(route, new_tbl)
    route_tbl = str(new_tbl) + "_tvw"
    arcpy.MakeTableView_management(new_tbl, route_tbl)

    # Export table with name then do additional fields per year or whatever
    arcpy.AddField_management(route_tbl, "GEOID10", "TEXT", "", "", 15,
                              "GEOID10")
    arcpy.AddField_management(route_tbl, "SITE", "TEXT", "", "", 75, "SITE")
    arcpy.AddField_management(route_tbl, "ACRES", "DOUBLE", "", "", "",
                              "ACRES")
    arcpy.AddField_management(route_tbl, "POP", "LONG", "", "", "", "POP")
    arcpy.AddField_management(route_tbl, "ACRE_PP", "DOUBLE", "", "", "",
                              "ACRE_PP")
    arcpy.AddField_management(route_tbl, "PARK_PP", "DOUBLE", "", "", "",
                              "PARK_PP")

    expression1 = "(!Name![0:15])"
    expression2 = "(!Name![18:])"
    expression3 = "(!SITE![:-6])"
    arcpy.CalculateField_management(route_tbl, "GEOID10", expression1,
                                    "PYTHON_9.3")
    arcpy.CalculateField_management(route_tbl, "SITE", expression2,
                                    "PYTHON_9.3")
    arcpy.CalculateField_management(route_tbl, "SITE", expression3,
                                    "PYTHON_9.3")

    arcpy.AddJoin_management(route_tbl, "SITE", park, "NAME")
    field_name_1 = str(park)[:-4]
    expression4 = "(" + "!" + field_name_1 + ".MAP_ACRES!" + ")"
    arcpy.CalculateField_management(route_tbl, "ACRES", expression4,
                                    "PYTHON_9.3")
    arcpy.RemoveJoin_management(route_tbl)

    arcpy.AddJoin_management(route_tbl, "GEOID10", block, "GEOID10")
    field_name_2 = str(block)[:-4]
    expression5 = "(" + "!" + field_name_2 + ".POP!" + ")"
    arcpy.CalculateField_management(route_tbl, "POP", expression5,
                                    "PYTHON_9.3")
    arcpy.RemoveJoin_management(route_tbl)

    # Deletes rows where GEOID10 AND SITE are duplicates
    arcpy.DeleteIdentical_management(route_tbl, ["GEOID10", "SITE"])

    # summarize SITE by ACRES & POP
    site_tbl = str(route_tbl) + "_stats"
    arcpy.Statistics_analysis(route_tbl, site_tbl,
                              [["ACRES", "MEAN"], ["POP", "SUM"]], "SITE")

    # calculate acres/person & site/person for each park
    arcpy.AddField_management(site_tbl, "ACRE_PP", "DOUBLE", "", "", "",
                              "ACRE_PP")
    arcpy.AddField_management(site_tbl, "PARK_PP", "DOUBLE", "", "", "",
                              "PARK_PP")
    expression6 = "(!MEAN_ACRES!/!SUM_POP!)"
    expression7 = "(1/!SUM_POP!)"
    arcpy.CalculateField_management(site_tbl, "ACRE_PP", expression6,
                                    "PYTHON_9.3")
    arcpy.CalculateField_management(site_tbl, "PARK_PP", expression7,
                                    "PYTHON_9.3")

    arcpy.AddJoin_management(route_tbl, "SITE", site_tbl, "SITE")
    expression8 = "(!" + site_tbl + ".ACRE_PP!)"
    expression9 = "(!" + site_tbl + ".PARK_PP!)"
    arcpy.CalculateField_management(route_tbl, "ACRE_PP", expression8,
                                    "PYTHON_9.3")
    arcpy.CalculateField_management(route_tbl, "PARK_PP", expression9,
                                    "PYTHON_9.3")
    arcpy.RemoveJoin_management(route_tbl)

    # Summarize route layer by GEOID
    geoid_tbl = str(route_tbl) + "_geoidStats"
    arcpy.Statistics_analysis(route_tbl, geoid_tbl,
                              [["ACRE_PP", "SUM"], ["PARK_PP", "SUM"]],
                              "GEOID10")

    # join back to block and calculate fields
    arcpy.AddJoin_management(block, "GEOID10", geoid_tbl, "GEOID10")
    expression10 = "(!" + geoid_tbl + ".SUM_ACRE_PP!)"
    expression11 = "(!" + geoid_tbl + ".SUM_PARK_PP!)"
    arcpy.CalculateField_management(block, "ACRE_PP", expression10,
                                    "PYTHON_9.3")
    arcpy.CalculateField_management(block, "PARK_PP", expression11,
                                    "PYTHON_9.3")
    arcpy.RemoveJoin_management(block)

    with arcpy.da.UpdateCursor(block, ["ACRE_PP", "PARK_PP"]) as cursor:
        for row in cursor:
            if row[0] is None:
                row[0] = 0
            if row[1] is None:
                row[1] = 0
            cursor.updateRow(row)
            del row
    del cursor
    return
コード例 #27
0
arcpy.AddField_management("S_roads", 'RTE_NM', 'TEXT', 50)
arcpy.AddField_management("S_roads", 'TRANSPORT_EDGE_FROM_MSR', 'TEXT', 50)
arcpy.AddField_management("S_roads", 'TRANSPORT_EDGE_TO_MSR', 'TEXT', 50)
with arcpy.da.SearchCursor(featureClass, fields) as sSrchCrsr:
    with arcpy.da.InsertCursor('S_roads', fields) as sInsertCrsr:
        for row in sSrchCrsr:
            if ("SB" in str(row[2]) and "SC" in str(row[2])):
                sInsertCrsr.insertRow((row[0], row[1], row[2], row[3], row[4], row[5]))
with arcpy.da.UpdateCursor("S_roads", ["EDGE_RTE_KEY", "Key"]) as sUp:
    for row in sUp:
        row[1] = row[0].replace("SB", "_B")
        sUp.updateRow(row)

print "South Features Extracted"
################################## Begin join for North and South data
arcpy.MakeTableView_management ("N_Roads", "N_RoadsVw")
arcpy.MakeTableView_management ("S_Roads", "S_RoadsVw")
arcpy.AddJoin_management("N_RoadsVw", "Key", "S_RoadsVw", "Key")
arcpy.TableToTable_conversion("N_RoadsVw", wrkspce, finalNSTable)
arcpy.RemoveJoin_management("N_RoadsVw")

print "North and South Tables Joined"
arcpy.Delete_management("N_RoadsVw")
arcpy.Delete_management("S_RoadsVw")
arcpy.Delete_management("S_roads")
arcpy.Delete_management("N_roads")


################################# Begin calc to see if North and South measures match
arcpy.AddField_management(finalNSTable, 'TO_MEAS_MATCH', 'TEXT', '20')
arcpy.AddField_management(finalNSTable, 'FROM_MEAS_MATCH', 'TEXT', '20')
コード例 #28
0
# MODIFIED FROM:
'''
Created on Nov 19, 2013
@author: mosteele
'''

import arcpy, os
# TO RUN AS ARCMAP SCRIPT
fc = arcpy.GetParameterAsText(0)
# TO RUN IN OS
# fc = r'C:\Users\todda\Documents\ArcGIS\Default.gdb\Large_parcels_adjacent_pub_l'
fieldsToPrintList = ["OWNER1", "OWNERADDR"]
# large_parcels_union = fc
owner_id = 0
owner_id_field = "Owner_ID"
arcpy.MakeTableView_management(fc, 'tempfcTable')

# outFile = r'C:\Users\todda\Documents\ArcGIS\unique_values_in_owner_address_fields.txt'
owner_list = []
address_list = []
o_count = 0
a_count = 0
# with open(outFile,'w') as w:
fieldList = arcpy.ListFields('tempfcTable')
print "Creating owner list..."
arcpy.AddMessage("Creating owner list...")
for field in fieldList:
    if field.name == fieldsToPrintList[0]:
        values = [
            row[0] for row in arcpy.da.SearchCursor('tempfcTable', field.name)
        ]
コード例 #29
0
def convert(costpath, file_name_1, file_name_2, name_1, name_2):
    try:
        arcpy.RasterToPolyline_conversion(costpath, directory + '\polylines\pl_' + file_name_1 + '_' + file_name_2,
                                          "ZERO", 10, "SIMPLIFY")
        distance = 0
        with arcpy.da.SearchCursor(directory + '\polylines\pl_' + file_name_1 + '_' + file_name_2 + '.shp',
                                   ['SHAPE@LENGTH']) as poly_cursor:
            for row in poly_cursor:
                distance += row[0]  # sum distance for each polyline segment
    except arcpy.ExecuteError:
        error = arcpy.GetMessages(2)
        str_error = str(error)
        if str_error.startswith('ERROR 010151'):
            print('\nCannot convert cost path raster between ' + loc_one_name + ' and ' + loc_two_name +
                  ' to a valid polyline, but rest of data should be saved properly.  Source and destination may be too'
                  'close to each other.')
            print('Linear distance between source and destination set to zero in output table.')
            print(str(error))
            log.write(asctime() + ': Cannot convert cost path raster between ' + loc_one_name + ' and ' + loc_two_name +
                      ' to a valid polyline, but rest of data should be saved properly.\n'
                      + 'Linear distance between source and destination set to zero in output table.\n' + str(error) +
                      '------------------------------------------------------------------------------------------'
                      + '\n')
            distance = 0
        else:
            print('\nCannot convert cost path raster between ' + loc_one_name + ' and ' + loc_two_name +
                  ' to a valid polyline, but rest of data should be saved properly.')
            print('Linear distance between source and destination not calculated.')
            print(str(error))
            log.write(asctime() + ': Cannot convert cost path raster between ' + loc_one_name + ' and ' + loc_two_name +
                      ' to a valid polyline, but rest of data should be saved properly.\n'
                      + 'Linear distance between source and destination not calculated.\n' + str(error) +
                      '------------------------------------------------------------------------------------------'
                      + '\n')
            distance = 'NA'
    except Exception as error:
        print('\nCannot convert cost path raster between ' + loc_one_name + ' and ' + loc_two_name +
              ' to a valid polyline.')
        print('Linear distance between source and destination not calculated.')
        print(str(error))
        log.write(asctime() + ': Cannot convert cost path raster between ' + loc_one_name + ' and ' + loc_two_name +
                  ' to a valid polyline, but rest of data should be saved properly.\n'
                  + 'Linear distance between source and destination not calculated.\n' + str(error) +
                  '------------------------------------------------------------------------------------------'
                  + '\n')
        distance = 0

    try:
        arcpy.AddField_management(costpath, 'Source', 'TEXT')
        arcpy.AddField_management(costpath, 'Destination', 'TEXT')
        arcpy.AddField_management(costpath, 'Linear_Distance', 'FLOAT')
        arcpy.CalculateField_management(costpath, 'Source', "'" + name_1 + "'")
        arcpy.CalculateField_management(costpath, 'Destination', "'" + name_2 + "'")
        arcpy.CalculateField_management(costpath, 'Linear_Distance', distance)
        arcpy.MakeTableView_management(costpath, 'table')
        with arcpy.da.SearchCursor('table', ['SOURCE', 'DESTINATION', 'PATHCOST',
                                             'LINEAR_DISTANCE', 'Rowid']) as table_cursor:
            for entry in table_cursor:
                if entry[4] != 0:
                    in_cursor = arcpy.da.InsertCursor(table, fields)
                    in_cursor.insertRow((str(entry[0]), str(entry[1]), entry[2], entry[3]))
                    del in_cursor

        if int_data is True:
            try:
                arcpy.CopyRows_management(costpath, directory + r'\tables\tb_' + file_name_1 + '_' + file_name_2
                                          + '.csv')
            except Exception as error:
                print('\nFailed to save data for cost path between ' + loc_one_name + ' and ' + loc_two_name
                      + ' in .csv table. See error message for more details.')
                print('Linear distance between source and destination not calculated.')
                print(str(error))
                log.write(asctime() + ': Failed to save data for cost path between ' + loc_one_name + ' and '
                          + loc_two_name + ' in .csv table. See error message for more details.\n' + str(error) +
                          '------------------------------------------------------------------------------------------'
                          + '\n')

            try:
                costpath.save(directory + r'\costpath\cp_' + file_name_1 + '_' + file_name_2)
            except Exception as error:
                str_error = str(error)
                if str_error.startswith('ERROR 010240'):
                    print('\nCould not save cost path raster cp_' + file_name_1 + '_'
                          + file_name_2 + ', but rest of data should be saved properly.')
                    print('Combination of file names for fc one and fc two likely exceeds 13 characters. '
                          'See help file for more information.')
                    log.write(asctime() + ': Could not save cost path raster cp_' + file_name_1 + '_'
                              + file_name_2 + ', but rest of data should be saved properly.\n'
                              + 'Combination of file names for fc one and fc two likely exceed 13 characters. '
                                'See help file for more information.\n' + str(error) + '\n'
                              + '----------------------------------------------------'
                                '--------------------------------------'
                              + '\n')
                else:
                    print('\nCould not save cost path raster cp_' + file_name_1 + '_' + file_name_2 +
                          ', but rest of data should be saved properly. See error message for more details')
                    print(str(error))
                    log.write(asctime() + ': Could not save cost path raster cp_' + file_name_1 + '_' + file_name_2 +
                              ', but rest of data should be saved properly. See error message for more details.\n' +
                              '-------------------------------------------------------'
                              '-----------------------------------'
                              + '\n')
    except arcpy.ExecuteError:
        error = arcpy.GetMessages(2)
        print('\nFailed to properly save data for least cost path between ' + loc_one_name + ' and ' + loc_two_name +
              ' in master table. Script will continue with next iteration.')
        print(str(error))
        log.write(asctime() + ': Failed to properly save data for least cost path between ' + loc_one_name + ' and '
                  + loc_two_name + ' in master table. Script continued with next iteration.'
                  + '.\n' + str(error) +
                  '------------------------------------------------------------------------------------------' + '\n')
    except Exception as error:
        print('\nFailed to properly save data for least cost path between ' + loc_one_name + ' and ' + loc_two_name +
              ' in master table. Script will continue with next iteration.')
        print(str(error))
        log.write(asctime() + ': Failed to properly save data for least cost path between ' + loc_one_name + ' and '
                  + loc_two_name + ' in master table. Script continued with next iteration.'
                  + '.\n' + str(error) +
                  '------------------------------------------------------------------------------------------' + '\n')
コード例 #30
0
rvLyr = "RespVarsLyr"

# ---Functions---
def msg(txt,type="message"):
    print txt
    if type == "message":
        arcpy.AddMessage(txt)
    elif type == "warning":
        arcpy.AddWarning(txt)
    elif type == "error":
        arcpy.AddError(txt)

## PROCESSES
# Make a feature layer of the respVarsFC
msg("...creating table view of response variables")
rvLyr = arcpy.MakeTableView_management(respvarsFC,rvLyr)

# Join updateTbl to layer
msg("...joining tables")
rvJoin = arcpy.AddJoin_management(rvLyr,fromJoinFld,updateTbl,toJoinFld)

# Loop through the update fields
for fld in updateFlds.split(";"):
    msg("...updating {}".format(fld))
    
    # Get the update and calc field names
    updateFld = os.path.basename(respvarsFC) + "." + fld
    valueFld = os.path.basename(updateTbl) + "." + fld

    # Check that the update field actually exists
    if arcpy.ListFields(rvLyr,updateFld) == []: