コード例 #1
0
class SearchButtonClass4(object):
    global emexditems
    global featclasses
    global selattrib
    global ProjectName
    featclasses = emexditems
    arcpy.SetLogHistory(False)

    def __init__(self):
        self.enabled = True
        self.checked = False

    def onClick(self):
        featclasses = emexditems
        for fc in featclasses:
            SAS_Stormwater_SDE_swManhole = fc
            try:
                arcpy.SelectLayerByAttribute_management(
                    SAS_Stormwater_SDE_swManhole, "NEW_SELECTION",
                    selattrib + " LIKE '%" + ProjectName + "%'")
            except:
                print " Skip " + fc
            del SAS_Stormwater_SDE_swManhole

    arcpy.SetLogHistory(True)
コード例 #2
0
 def field_reorder(self):
     _field_lists = [('building_id', '建筑ID'), ('type', '类型'), ('floor', '楼层'), ('floor_z', '层高'), ('status', '状态'),
                     ('fire_area', '防火分区'), ('function_type', '功能类型'), ('door_aspect', '门_方向'),
                     ('area_type', '分区类型'), ('name', '建筑名称'), ('z', '显示高度'), ('floor_d', '层差'), ('city', '市'),
                     ('province', '省')]
     _field_order_lists = OrderedDict(_field_lists)  # 按照输入的顺序,输出字典的key和value
     # Field Copy
     for _key in _field_order_lists.keys():
         # arcpy.AddMessage(_key)
         for fld in arcpy.ListFields(self._feature):
             if fld.name == _key:
                 _field_new = _key + '_1'
                 arcpy.AddField_management(self._feature, _field_new, fld.type, fld.precision, fld.scale, fld.length)
                 expression = '!{}!'.format(_key)  # 字符串无法直接变成变量名,需要加一个转换
                 # arcpy.AddMessage(expression)
                 arcpy.CalculateField_management(self._feature, _field_new, expression, "PYTHON_9.3")
                 arcpy.DeleteField_management(self._feature, _key)
                 # arcpy.AddMessage("The {0} was deleted.".format(_key))
     # Field Reorder
     for _key in _field_order_lists.keys():
         for fld in arcpy.ListFields(self._feature):
             if fld.name == _key + '_1':
                 arcpy.AddField_management(self._feature, _key, fld.type, fld.precision, fld.scale, fld.length,
                                           _field_order_lists[_key])
                 expression = '!{}!'.format(fld.name)  # 字符串无法直接变成变量名,需要加一个转换
                 # arcpy.AddMessage(expression)
                 arcpy.CalculateField_management(self._feature, _key, expression, "PYTHON_9.3")
                 arcpy.DeleteField_management(self._feature, fld.name)
                 # arcpy.AddMessage("The {0} was deleted.".format(fld.name))
     arcpy.SetLogHistory(True)
コード例 #3
0
def setup_arcpy_environment():
    ''' Process: Define some misc. arcpy environment variables'''
    arcpy.env.overwriteOutput = True
    arcpy.SetLogHistory(False)
    arcpy.env.pyramid = "NONE"
    arcpy.env.rasterStatistics = "None"
    arcpy.env.XYResolution = "0.0005 METERS"
    arcpy.env.XYTolerance = "0.001 METERS"
    arcpy.env.outputCoordinateSystem = 2927  # WASPSNAD83HARNFEET
コード例 #4
0
    def __init__(self):
        """Define the toolbox (the name of the toolbox is the name of the
        .pyt file)."""
        self.label = "Toolbox"
        self.alias = ""

        # List of tool classes associated with this toolbox
        self.tools = [CutTool, OverlapReport, UpdateOverlapReportConfig]
        arcpy.SetLogHistory(False)
        arcpy.env.overwriteOutput = True
コード例 #5
0
def getTrainLocations(gdbws, fcdatasetname, trainTableName):
    """
    公共交通APIの列車ロケーションから変換済みのCSVをもとに
    列車ロケーション情報を取得しDelay_Railroad_Metro, Delay_Station_Metro を最新化
    第1引数:challenge_db.gdb へのフルパス
    第2引数:フィーチャデータセット名
    第3引数:現在の列車遅延csvファイル
    """
    logging.debug('--arcgis processing:getTrainLocations')

    # Network Analystのライセンスをチェックアウト
    arcpy.CheckOutExtension("Network")

    # Tips! バッチ処理など繰り返しするだんだん速度低下するのでログをオフに
    arcpy.SetLogHistory(False)

    train_ws = os.path.dirname(gdbws)
    arcpy.env.workspace = train_ws

    # 抽出する路線名
    operator_name = 'odpt.Operator:TokyoMetro'

    # 抽出する対象の遅れ時間
    delayTime = 0

    # 動作確認用
    #q_wh ="odpt_delay >= 0 AND odpt_operator = 'odpt.Operator:TokyoMetro' AND odpt_tostation IS NOT NULL AND odpt_fromstation IS NOT NULL"
    #q_wh = "odpt_delay >= {0} AND odpt_operator = '{1}' AND odpt_tostation IS NOT NULL AND odpt_fromstation IS NOT NULL".format(delayTime, operator_name)


    # 本番用(FromStation と ToStationが入っている場合)
    q_wh = "odpt_delay > {0} AND odpt_operator = '{1}' AND odpt_tostation IS NOT NULL AND odpt_fromstation IS NOT NULL".format(delayTime, operator_name)
    #print q_wh

    fields = ['dc_date','odpt_delay','odpt_railway','odpt_fromstation','odpt_tostation','odpt_operator','odpt_trainnumber']

    try:

        with arcpy.da.SearchCursor(trainTableName, fields, q_wh) as cursor:
            # バッチ処理高速化-ネットワーク解析を行なう関数側にRecordSetを渡すようにした
            trainLocationAnalBatch(gdbws, fcdatasetname, cursor)


    except arcpy.ExecuteError as e:
        # GP エラー
        logging.debug(str(e).decode('utf-8'))
        #print str(e).decode('utf-8')
    except Exception as e:
        # Python エラー
        logging.debug(str(e).decode('utf-8'))
        #print str(e).decode('utf-8')
    finally:
        # ファイルの後始末
        arcpy.Delete_management(trainTableName)
コード例 #6
0
class SearchComboBoxClass3(object):
    global emexditems
    global featclasses
    global selattrib
    global ProjectName
    featclasses = emexditems
    arcpy.SetLogHistory(False)

    def __init__(self):
        self.items = []
        self.editable = True
        self.enabled = True
        self.dropdownWidth = 'WWWWWWW'
        self.width = 'WWWWWWW'

    def onSelChange(self, selection):
        pass

    def onEditChange(self, text):
        global ProjectName
        ProjectName = text

    def onFocus(self, focused):
        pass

    def onEnter(self):
        featclasses = emexditems
        for fc in featclasses:
            SAS_Stormwater_SDE_swManhole = fc
            try:
                arcpy.SelectLayerByAttribute_management(
                    SAS_Stormwater_SDE_swManhole, "NEW_SELECTION",
                    selattrib + " LIKE '%" + ProjectName + "%'")
            except:
                print " Skip " + fc
            del SAS_Stormwater_SDE_swManhole

    def refresh(self):
        pass

    arcpy.SetLogHistory(True)
コード例 #7
0
 def __init__(self, inputfc, outputfc):
     arcpy.SetLogHistory(False)
     arcpy.env.overwriteOutput = True
     self.inputfc = inputfc
     self.inputtype = arcpy.Describe(inputfc).dataType
     self.geomtype = arcpy.Describe(inputfc).shapeType
     self.outputfc = outputfc
     self.sr = arcpy.SpatialReference(4612)
     arcpy.CreateFeatureclass_management(os.path.dirname(self.outputfc),
                                         os.path.basename(self.outputfc),
                                         self.geomtype,
                                         self.inputfc,
                                         spatial_reference=self.sr)
コード例 #8
0
def setup_arcpy_environment():
    ''' Process: Define some misc. arcpy environment variables'''
    try:
        import arcpy
        arcpy.env.overwriteOutput = True
        arcpy.SetLogHistory(False)
        arcpy.env.pyramid = "NONE"
        arcpy.env.rasterStatistics = "None"
        arcpy.env.XYResolution = "0.0005 METERS"
        arcpy.env.XYTolerance = "0.001 METERS"
        arcpy.env.outputCoordinateSystem = 2927  # WASPSNAD83HARNFEET
    except ImportError:
        print("The module arcpy does not seem to be available."
              "Are you working in a virtual environment?")
コード例 #9
0
def generate_data(values):
    arcpy.env.overwriteOutput = True
    arcpy.SetLogHistory(False)

    queue_item, lock_item = values
    results = []

    try:
        while True:
            item = queue_item.get_nowait()
            scenario = item.split("\\")[0]
            print_message(lock_item, "Starting {}".format(scenario))

    except:
        return results
コード例 #10
0
ファイル: SysGISTools.py プロジェクト: bibeputt/GISPython
    def __init__(self, ToolName, Params, licenceLevel="arceditor"):
        """Class initialization procedure

        Args:
            self: The reserved object 'self'
            ToolName: Name of the tool (used for output)
            Params: parameter object
            licenceLevel: arcinfo, arceditor, arcview, arcserver, arcenginegeodb, or arcengine
        """
        self.ExecutePatch = os.path.dirname(os.path.realpath(__file__))

        if licenceLevel == "arcinfo":
            print(u'...Using licence level : ' + licenceLevel)
            import arcinfo
        elif licenceLevel == "arceditor":
            print(u'...Using licence level : ' + licenceLevel)
            import arceditor
        elif licenceLevel == "arcview":
            print(u'...Using licence level : ' + licenceLevel)
            import arcview
        elif licenceLevel == "arcserver":
            print(u'...Using licence level : ' + licenceLevel)
            import arcserver
        elif licenceLevel == "arcenginegeodb":
            print(u'...Using licence level : ' + licenceLevel)
            import arcenginegeodb
        elif licenceLevel == "arcengine":
            print(u'...Using licence level : ' + licenceLevel)
            import arcengine
        else:
            print(u'...Incorect licence suplied - using : arceditor')
            import arceditor

        import arcpy
        self.Pr = Params
        LogDir = self.Pr.ErrorLogDir
        LogDirArh = self.Pr.ErrorLogDirArh
        OutDir = self.Pr.OutDir
        OutDirArh = self.Pr.OutDirArh
        self.StartTime = datetime.datetime.now()
        self.gp = arcpy
        if hasattr(Params, 'SetLogHistory'):
            arcpy.SetLogHistory(Params.SetLogHistory)
        else:
            arcpy.SetLogHistory(False)
        self.GDBHelper = GDBHelper.GDBHelper(self.gp)
        self.ToolName = ToolName
        self.SR = MySR()
        self.OutputStr = u''
        self.OutputErrStr = u''
        self.State = "Started"
        try:
            self.AchiveFiles(LogDir, LogDirArh, ToolName, False)
        except Exception, err:
            print(u'Error archiving errlog files')
            if hasattr(err, 'strerror'):
                if hasattr(err, 'strerror'):
                    print(err.strerror)
                else:
                    print('{}'.format(err))
            else:
                print(err.message)
コード例 #11
0
sr_mol = arcpy.SpatialReference(
    'Mollweide (world)')  # projected Mollweide (54009)
sr_gcs = arcpy.SpatialReference(
    'WGS 1984')  # geographic coordinate system WGS84 (4326)

# environment
os.chdir(wd)
if not os.path.exists('tmp'): os.makedirs('tmp')
if not os.path.exists(dir_tmp): os.makedirs(dir_tmp)
if not arcpy.Exists(gdb):
    print 'creating gdb'
    arcpy.CreateFileGDB_management(os.path.dirname(gdb), os.path.basename(gdb))
arcpy.env.overwriteOutput = True
arcpy.env.workspace = gdb
arcpy.SetLogHistory(
    True
)  # C:\Users\visitor\AppData\Roaming\ESRI\Desktop10.2\ArcToolbox\History

# copy shapefiles to local gdb
arcpy.env.outputCoordinateSystem = sr_gcs
shps_rgn = ['rgn_gcs'] + ['rgn_%s_gcs' % b for b in buffers]
for fc_rgn in [fc_gadm] + ['%s\\%s.shp' % (dir_rgn, x) for x in shps_rgn]:
    fc_gdb = os.path.splitext(os.path.basename(fc_rgn))[0]
    if not arcpy.Exists(fc_gdb):
        print 'copying', fc_gdb
        arcpy.CopyFeatures_management(fc_rgn, fc_gdb)

    # convert gcs to mol
    fc_mol = fc_gdb.replace('_gcs', '_mol')
    if not arcpy.Exists(fc_mol):
        print 'projecting', fc_mol
コード例 #12
0
#  * go through slivers of FAO and holes from earth box erased by the rest and manually associate with legit region
#  * convert EEZ of Black Sea to eez-inland
#  * merge all products and peform checks for overlap and geometry repair
#
# Built using:
#  ArcGIS 10.2.1
#  Python Data Analysis Library (pandas) installed with easy_install
#
# Changes since OHI 2013
# * New EEZ splits:
#   - 140 Guadeloupe and Martinique
#   - 116 Puerto Rico and Virgin Islands of the United States

import arcpy, os, re, numpy as np, socket, pandas, time
from collections import Counter
arcpy.SetLogHistory(True) # %USERPROFILE%\AppData\Roaming\ESRI\Desktop10.2\ArcToolbox\History

# configuration based on machine name
conf = {
    'Amphitrite':
    {'dir_git'    :'G:/ohiprep',
     'dir_neptune':'N:',
     'dir_tmp'    :'C:/tmp',
     }}[socket.gethostname()]

# paths
nm      = 'NCEAS-Regions_v2014'                                      # name of data product
td      = '{0}/Global/{1}'.format(conf['dir_tmp'], nm)               # temp directory on local filesystem
gdb     = '{0}/geodb.gdb'.format(td)                                 # file geodatabase
ad      = '{0}/git-annex/Global/{1}'.format(conf['dir_neptune'], nm) # git annex directory on neptune
gd      = '{0}/Global/{1}'.format(conf['dir_git'], nm)               # git directory on local filesystem
コード例 #13
0
def polygons_in_zones(zone_fc, zone_field, polygons_of_interest, output_table,
                      interest_selection_expr):
    old_workspace = arcpy.env.workspace
    arcpy.env.workspace = 'in_memory'
    arcpy.SetLogHistory(False)
    arcpy.env.outputCoordinateSystem = arcpy.SpatialReference(102039)
    selected_polys = 'selected_polys'
    # fixes some stupid ArcGIS thing with the interactive Python window
    if arcpy.Exists(selected_polys):
        arcpy.env.overwriteOutput = True

    arcpy.AddMessage('Copying/selecting polygon features...')
    if interest_selection_expr:
        arcpy.Select_analysis(polygons_of_interest, selected_polys,
                              interest_selection_expr)
    else:
        arcpy.CopyFeatures_management(polygons_of_interest, selected_polys)

    # use tabulate intersection for the areas overlapping
    arcpy.AddMessage('Tabulating intersection between zones and polygons...')
    tab_table = arcpy.TabulateIntersection_analysis(
        zone_fc, zone_field, selected_polys, 'tabulate_intersection_table')

    # area was calculated in map units which was m2 so convert to hectares
    arcpy.AddField_management(tab_table, 'Poly_Ha', 'DOUBLE')
    arcpy.CalculateField_management(tab_table, 'Poly_Ha', '!AREA!/10000',
                                    'PYTHON')

    # just change the name of the percent field
    arcpy.AlterField_management(tab_table, 'PERCENTAGE', 'Poly_Pct')

    # Now just get the count as there is no other area metric anymore
    spjoin_fc = arcpy.SpatialJoin_analysis(zone_fc, selected_polys,
                                           'spatial_join_output')
    arcpy.AlterField_management(spjoin_fc, 'Join_Count', 'Poly_n')

    # Add the density
    arcpy.AddField_management(spjoin_fc, 'Poly_nperha', 'DOUBLE')
    arcpy.CalculateField_management(spjoin_fc, 'Poly_nperha',
                                    '!Poly_n!/!shape.area@hectares!', 'PYTHON')

    arcpy.AddMessage('Refining output...')
    arcpy.JoinField_management(tab_table, zone_field, spjoin_fc, zone_field,
                               ["Poly_n", 'Poly_nperha'])
    final_fields = ['Poly_Ha', 'Poly_Pct', 'Poly_n', 'Poly_nperha']

    # make output nice
    arcpy.env.overwriteOutput = False
    cu.one_in_one_out(tab_table, final_fields, zone_fc, zone_field,
                      output_table)

    cu.redefine_nulls(output_table, final_fields, [0, 0, 0, 0])

    # clean up
    # can't delete all of in_memory because this function is meant to be called from another one that uses in_memory
    for item in [selected_polys, tab_table, spjoin_fc]:
        arcpy.Delete_management(item)
    arcpy.env.workspace = old_workspace

    arcpy.AddMessage('Polygons in zones tool complete.')
    arcpy.SetLogHistory(True)
コード例 #14
0
ファイル: nasa.py プロジェクト: westkey/M-SRM
def Process_TRMM(Package,Basin,Year,zoneField):

    # assembling the list of file addresses 
    TRMM_path= Package + '/Datos/NASA_Datos/TRMM/' + Year
    inZoneData = Package + '/Datos/Cuencas/' + Basin + '/Parametros/Shapefile/HighShape.tif'
    OutputTRMM = Package + '/Datos/Cuencas/' +Basin + '/Datos_Intermedia/TRMM/TRMM_Precip' + Year +'.dbf'
    OutputTRMMpath = Package + '/Datos/Cuencas/' +Basin + '/Datos_Intermedia/TRMM'

    #import modules and manage temporary folders
    import arcpy, arcgisscripting, sys, os, csv, string, shutil
    from arcpy import env
    from arcpy.sa import *
    try:
        from dbfpy import dbf
    except:
        print 'You do not have "dbfpy" installed, Process_TRMM cannot run!'
        return
    
    arcpy.env.overwriteOutput = True
    arcpy.SetLogHistory(True)
    arcpy.CheckOutExtension("Spatial")
    
    IntermediateOutput = TRMM_path + '/temp'

    # make sure the TRMM output path is there
    if not os.path.exists(OutputTRMMpath):
        os.makedirs(OutputTRMMpath)

    # If a temp data folder already exists, delete its contents and recreate it. 
    if os.path.exists(IntermediateOutput):
        shutil.rmtree(IntermediateOutput)
        os.makedirs(IntermediateOutput)
    else:

    ###### If no temporary folder exists, NetCDFs are converted to tiffs for the first time.
        os.makedirs(IntermediateOutput)
        arcpy.env.workspace = TRMM_path
        arcpy.env.overwriteOutput = True

        NCfiles = arcpy.ListFiles("*.nc")

        for filename in NCfiles:
            print 'Process_TRMM: Converting netCDF file ' + filename + ' to Raster'
            inNCfiles = arcpy.env.workspace + "/" + filename
            fileroot = filename[0:(len(filename)-3)]
            outRasterLayer = TRMM_path + "/" + fileroot
            arcpy.MakeNetCDFRasterLayer_md(inNCfiles, "r", "longitude", "latitude", "r", "", "", "BY_VALUE")   
            arcpy.CopyRaster_management("r", outRasterLayer + ".tif", "", "", "", "NONE", "NONE", "")

        print 'Process_TRMM: Finished creating TIFs!'
    #######

    # Execute zonal statistics functions on selected basin and store output as a dbf.
    arcpy.env.workspace = TRMM_path
    TIFfiles = arcpy.ListFiles("*.tif")
    
    try:
        for filename in TIFfiles:
            fileroot = filename[0:(len(filename)-4)]
            print "Process_TRMM: Calculating zonal statistics on " + filename
            inValueRaster = TRMM_path + '/'+  filename
            arcpy.CheckOutExtension("Spatial")
            tempout=IntermediateOutput + '/tempdbf.dbf'
            outZstat = ZonalStatisticsAsTable(inZoneData, zoneField, inValueRaster, tempout, "DATA", "ALL")

            # arcmap is a terribly programmed software package, so we have
            # save the output dbf as a static filename then rename it appropriately here.
            # this took forever to figure out (and we think it has something to do with "." in filename)
            outTable = IntermediateOutput + "/" + fileroot + '.dbf'
            os.rename(tempout,outTable)
            
        print 'Process_TRMM: Finished calculating zonal statistics!'

    except:
        print 'Error: Process_TRMM: Error encountered while calculating zonal statistics!'

    # Create csvs from DBFs
    arcpy.env.workspace = IntermediateOutput
    DBFfiles = arcpy.ListFiles("*.dbf")

    try:
        for filename in DBFfiles:
            fileroot = filename[0:(len(filename)-4)]
            
            # replace the '.' with '_', because the merge function is apparently very finicky with inputs.
            csv_fn = IntermediateOutput + '/' + string.replace(fileroot, '.', '_') + '.csv'
            
            inDBFfiles = arcpy.env.workspace + '/' + filename   
            with open(csv_fn,'wb') as csvfile:
                in_db = dbf.Dbf(inDBFfiles)
                out_csv = csv.writer(csvfile)
                names = []
                for field in in_db.header.fields:
                    names.append(field.name)
                out_csv.writerow(names)
                for rec in in_db:
                    out_csv.writerow(rec.fieldData)
                in_db.close()

        print 'Process_TRMM: Finished conversion to CSVs!'
    except:
        print 'Error: Process_TRMM: Error encountered while creating CSVs from dbfs!'

        
    # Merge CSVs together and print a bunch of progress items.
    arcpy.env.workspace = IntermediateOutput
    CSVfiles = arcpy.ListFiles("*.csv")

    print 'Process_TRMM: Creating Output file at ' + OutputTRMM
    print 'Process_TRMM: This may take up to 15 minutes' 
    arcpy.env.workspace = IntermediateOutput
    CSVfiles = arcpy.ListFiles("*.csv")
    arcpy.Merge_management(CSVfiles, OutputTRMM)

    print 'Process_TRMM: Output file created for year ' + Year
コード例 #15
0
def makeServiceAreas(outGDB,
                     accFeat,
                     costRastLoc,
                     costRastHwy,
                     rampPts,
                     rampPtsID,
                     grpFld=None,
                     maxCost=None,
                     attFld=None):

    # Checks on attFld
    if attFld:
        if not maxCost:
            print(
                'Must specify a `maxCost` value if using `attFld`, exiting...')
            return
        if isinstance(attFld, str) and not [
                attFld in [a.name for a in arcpy.ListFields(accFeat)]
        ]:
            print('Field ' + attFld +
                  ' not found in access features, exiting...')
            return

    arcpy.env.snapRaster = costRastLoc
    arcpy.env.cellSize = costRastLoc
    arcpy.env.extent = costRastLoc
    arcpy.env.outputCoordinateSystem = costRastLoc

    make_gdb(outGDB)
    arcpy.env.workspace = outGDB
    arcpy.SetLogHistory(False)

    # copy access points to gdb
    accFeat = arcpy.CopyFeatures_management(accFeat, 'accFeat_orig')
    if not grpFld:
        # add a field to assign all rows to one group.
        grpFld = 'serviceArea_group'
        arcpy.CalculateField_management(accFeat,
                                        grpFld,
                                        "1",
                                        field_type="SHORT")
    grps = unique_values(accFeat, grpFld)

    # assign max costs
    if maxCost:
        if isinstance(maxCost, str):
            arcpy.CalculateField_management(accFeat,
                                            'minutes_SA',
                                            '!' + maxCost + '!',
                                            field_type="FLOAT")
        else:
            arcpy.CalculateField_management(accFeat,
                                            'minutes_SA',
                                            maxCost,
                                            'PYTHON',
                                            field_type="FLOAT")
        # dictionary: grps: minutes
        grp_min = {
            a[0]: a[1]
            for a in arcpy.da.SearchCursor(accFeat, [grpFld, 'minutes_SA'])
        }

    for i in grps:
        n = grps.index(i) + 1
        if isinstance(i, str):
            rastout = "grp_" + i + "_servArea"
            cdpts = "grp_" + i + "_inputFeat"
            i_q = "'" + i + "'"
        else:
            rastout = "grp_" + str(int(i)) + "_servArea"
            cdpts = "grp_" + str(int(i)) + "_inputFeat"
            i_q = i
        if arcpy.Exists(rastout):
            # skip already existing
            continue

        print("working on group " + str(i) + " (" + str(n) + " of " +
              str(len(grps)) + ")...")
        arcpy.env.extent = costRastLoc  # reset extent prior to every run
        t0 = time.time()
        c = 1  # counter

        arcpy.Select_analysis(accFeat, cdpts, grpFld + " = " + str(i_q))
        print('Number of access pts: ' + arcpy.GetCount_management(cdpts)[0])

        # get service area in minutes
        if maxCost is not None:
            grpMaxCost = grp_min[i]
            # Make buffer to set a smaller extent, to reduce processing time.
            buffd = str(
                int(grpMaxCost * 1609)
            ) + ' METERS'  # buffer set to straightline distance at ~60 mph (1 mile per minute)
            print('Cost in minutes: ' + str(grpMaxCost))
            arcpy.Buffer_analysis(cdpts, "buffext", buffd)
            arcpy.env.extent = "buffext"
        else:
            grpMaxCost = None

        # local CD
        cd1 = arcpy.sa.CostDistance(cdpts, costRastLoc, grpMaxCost)
        nm = "cd" + str(c)
        cd1.save(nm)
        cds = [nm]

        # values to ramps
        rp1 = arcpy.sa.ExtractValuesToPoints(rampPts, cd1, "rp1", "NONE",
                                             "VALUE_ONLY")
        rp1s = arcpy.MakeFeatureLayer_management(
            rp1, where_clause="RASTERVALU IS NOT NULL")

        if int(arcpy.GetCount_management(rp1s)[0]) == 0:
            # No ramps reached: just output local roads only service area
            if attFld is not None:
                if isinstance(attFld, str):
                    areaval = unique_values(cdpts, attFld)[0]
                    area = arcpy.sa.Con("cd1", areaval, "",
                                        "Value <= " + str(grpMaxCost))
                    area.save(rastout)
                elif isinstance(attFld, int):
                    area = arcpy.sa.Con("cd1", attFld, "",
                                        "Value <= " + str(grpMaxCost))
                    area.save(rastout)
            else:
                cd1.save(rastout)
        else:
            # Some ramps reached: Run highways/local loop until there is no improvement in travel time.
            notin = [1]
            while len(notin) != 0:
                print('Limited-access cost distance run # ' +
                      str(int((c + 1) / 2)) + '...')
                arcpy.CopyFeatures_management(rp1s, "rp1s")

                # highway CD
                cd2 = arcpy.sa.CostDistance("rp1s",
                                            costRastHwy,
                                            grpMaxCost,
                                            source_start_cost="RASTERVALU")
                c += 1
                nm = "cd" + str(c)
                cd2.save(nm)
                cds = cds + [nm]

                rp2 = arcpy.sa.ExtractValuesToPoints(rampPts, cd2, "rp2",
                                                     "NONE", "VALUE_ONLY")
                # change name to avoid confusion with local ramp points
                arcpy.AlterField_management(rp2,
                                            "RASTERVALU",
                                            "costLAH",
                                            clear_field_alias=True)
                rp2s = arcpy.MakeFeatureLayer_management(
                    rp2, where_clause="costLAH IS NOT NULL")

                # Check for new ramps or ramps reached at least one minute faster after latest run (LAH)
                notin = []
                lahr = {
                    a[0]: a[1]
                    for a in arcpy.da.SearchCursor(rp2s,
                                                   [rampPtsID, 'costLAH'])
                }
                locr = {
                    a[0]: a[1]
                    for a in arcpy.da.SearchCursor('rp1s',
                                                   [rampPtsID, 'RASTERVALU'])
                }
                for a in lahr:
                    if a not in locr:
                        notin.append(a)
                    else:
                        if lahr[a] - locr[a] < -1:
                            notin.append(a)
                if len(notin) == 0:
                    print('No new ramps reached after LAH, moving on...')
                    break

                # back to local
                arcpy.CopyFeatures_management(rp2s, "rp2s")
                cd3 = arcpy.sa.CostDistance("rp2s",
                                            costRastLoc,
                                            grpMaxCost,
                                            source_start_cost="costLAH")
                c += 1
                nm = "cd" + str(c)
                cd3.save(nm)
                cds = cds + [nm]

                rp1 = arcpy.sa.ExtractValuesToPoints(rampPts, cd3, "rp1",
                                                     "NONE", "VALUE_ONLY")
                rp1s = arcpy.MakeFeatureLayer_management(
                    rp1, where_clause="RASTERVALU IS NOT NULL")

                # Check for new ramps or ramps reached at least one minute faster after latest run (Local)
                # Similar to process earlier, but with names reversed
                notin = []
                locr = {
                    a[0]: a[1]
                    for a in arcpy.da.SearchCursor(rp1s,
                                                   [rampPtsID, 'RASTERVALU'])
                }
                lahr = {
                    a[0]: a[1]
                    for a in arcpy.da.SearchCursor('rp2s',
                                                   [rampPtsID, 'costLAH'])
                }
                for a in locr:
                    if a not in lahr:
                        notin.append(a)
                    else:
                        if locr[a] - lahr[a] < -1:
                            notin.append(a)
                # end while loop

            if attFld is not None:
                if isinstance(attFld, str):
                    # cell statistics
                    areaval = round(unique_values(cdpts, attFld)[0], 3)
                    area = arcpy.sa.Con(
                        arcpy.sa.CellStatistics(cds, "MINIMUM", "DATA"),
                        areaval, "", "Value <= " + str(grpMaxCost))
                    area.save(rastout)
                elif isinstance(attFld, int):
                    area = arcpy.sa.Con(
                        arcpy.sa.CellStatistics(cds, "MINIMUM", "DATA"),
                        attFld, "", "Value <= " + str(grpMaxCost))
                    area.save(rastout)
            else:
                arcpy.sa.CellStatistics(cds, "MINIMUM", "DATA").save(rastout)

        print("Done with group: " + str(i))
        t1 = time.time()
        print('That took ' + str(int(t1 - t0)) + ' seconds.')

        # garbage pickup every 10 runs, last run
        if n == round(n, -1) or n == len(grps):
            print("Deleting files...")
            r = arcpy.ListRasters("cd*")
            fc = arcpy.ListFeatureClasses("rp*")
            fc.append("buffext")
            garbagePickup(r)
            garbagePickup(fc)

    # reset extent
    arcpy.env.extent = costRastLoc

    arcpy.BuildPyramids_management(rastout)
    return rastout


# General usage

# # Environment settings
# arcpy.env.parallelProcessingFactor = "100%"  # Adjust to some percent (e.g. 100%) for large extent analyses (e.g. maxCost = None)
# arcpy.env.mask = r'L:\David\projects\RCL_processing\RCL_processing.gdb\VA_Buff50mi_wgs84'
# arcpy.env.overwriteOutput = True
#
# # Cost surface variables
# costRastLoc = r'E:\RCL_cost_surfaces\Tiger_2019\cost_surfaces.gdb\costSurf_no_lah'
# costRastHwy = r'E:\RCL_cost_surfaces\Tiger_2019\cost_surfaces.gdb\costSurf_only_lah'
# rampPts = r'E:\RCL_cost_surfaces\Tiger_2019\cost_surfaces.gdb\rmpt_final'
# rampPtsID = 'UniqueID'  # unique ramp segment ID attribute field, since some ramps have multiple points
#
# # Facilities features and settings
# accFeat = r'accessFeatures'
# outGDB = r'serviceAreas.gdb'
# # Attributes
# grpFld = 'facil_code'
# maxCost = 30
# attFld = None
# makeServiceAreas(outGDB, accFeat, costRastLoc, costRastHwy, rampPts, rampPtsID, grpFld, maxCost, attFld)
コード例 #16
0
# try:
#     break
# except exception as e:
#   logger.exception('Unhandled Exception')

start = time.time()
Start = datetime.datetime.now()
msgFolder = "C:/logs/"
sender = "*****@*****.**"
recipient = "*****@*****.**"

fc = "C:\MYLATesting.gdb\MYLA311"
if arcpy.Exists(fc):
    arcpy.Delete_management(fc)

arcpy.SetLogHistory(True)

f2 = open('C:\Users\Administrator\Desktop\DetailView.json', 'r')
data2 = jsonpickle.encode(jsonpickle.decode(f2.read()))

url2 = "https://myla311.lacity.org/myla311router/mylasrbe/1/QuerySR"

headers2 = {'Content-type': 'text/plain', 'Accept': '/'}

r2 = requests.post(url2, data=data2, headers=headers2)
decoded2 = json.loads(r2.text)

Start = datetime.datetime.now()

# print Start
コード例 #17
0
ファイル: managers.py プロジェクト: LGDC/ArcProc
import arcpy

from arcproc import dataset
from arcproc import features
from arcproc.helpers import elapsed, log_entity_states, slugify, unique_path
from arcproc.metadata import Field, SpatialReferenceSourceItem

LOG: logging.Logger = logging.getLogger(__name__)
"""Module-level logger."""

# Py3.7: Can replace usage with `typing.Self` in Py3.11.
TProcedure = TypeVar("TProcedure", bound="Procedure")
"""Type variable to enable method return of self on Procedure."""

arcpy.SetLogHistory(False)


class Procedure(ContextDecorator):
    """Manager for a single Arc-style procedure."""

    keep_transforms: bool = False
    """Preserve transformation datasets if True."""
    name: str = "Unnamed Procedure"
    """Procedure name."""
    time_started: _datetime
    """Timestamp for when procedure started."""
    transform_path: Path = None
    """Path to current transformation dataset."""
    workspace_path: Path = "memory"
    """Path to workspace for transformation datasets."""
コード例 #18
0
def handle_overlaps(zone_fc,
                    zone_field,
                    in_value_raster,
                    out_table,
                    is_thematic,
                    unflat_table='',
                    rename_tag='',
                    units='',
                    debug_mode=False):
    orig_env = env.workspace
    if debug_mode:
        env.overwriteOutput = True
        temp_gdb = cu.create_temp_GDB('zonal_tabarea')
        env.workspace = temp_gdb
        arcpy.AddMessage('Debugging workspace located at {}'.format(temp_gdb))
    else:
        env.workspace = 'in_memory'
    arcpy.SetLogHistory(False)
    arcpy.CheckOutExtension("Spatial")

    def stats_area_table(zone_fc=zone_fc,
                         zone_field=zone_field,
                         in_value_raster=in_value_raster,
                         out_table=out_table,
                         is_thematic=is_thematic):
        def refine_zonal_output(t):
            """Makes a nicer output for this tool. Rename some fields, drop unwanted
                ones, calculate percentages using raster AREA before deleting that
                field."""
            if is_thematic:
                value_fields = arcpy.ListFields(t, "VALUE*")
                pct_fields = [
                    '{}_pct'.format(f.name) for f in value_fields
                ]  # VALUE_41_pct, etc. Field can't start with number.

                # add all the new fields needed
                for f, pct_field in zip(value_fields, pct_fields):
                    arcpy.AddField_management(t, pct_field, f.type)

                # calculate the percents
                cursor_fields = ['AREA'] + [f.name
                                            for f in value_fields] + pct_fields
                uCursor = arcpy.da.UpdateCursor(t, cursor_fields)
                for uRow in uCursor:
                    # unpacks area + 3 tuples of the right fields for each, no matter how many there are
                    vf_i_end = len(value_fields) + 1
                    pf_i_end = vf_i_end + len(pct_fields)

                    # pct_values and ha_values are both null at this point but unpack for clarity
                    area, value_values, pct_values = uRow[0], uRow[
                        1:vf_i_end], uRow[vf_i_end:pf_i_end]
                    new_pct_values = [100 * vv / area for vv in value_values]
                    new_row = [area] + value_values + new_pct_values
                    uCursor.updateRow(new_row)

                for vf in value_fields:
                    arcpy.DeleteField_management(t, vf.name)

            arcpy.AlterField_management(t, 'COUNT', 'CELL_COUNT')
            drop_fields = ['ZONE_CODE', 'COUNT', 'AREA']
            if not debug_mode:
                for df in drop_fields:
                    try:
                        arcpy.DeleteField_management(t, df)
                    except:
                        continue

        # Set up environments for alignment between zone raster and theme raster
        if isinstance(zone_fc, arcpy.Result):
            zone_fc = zone_fc.getOutput(0)
        this_files_dir = os.path.dirname(os.path.abspath(__file__))
        os.chdir(this_files_dir)
        common_grid = os.path.abspath('../common_grid.tif')
        env.snapRaster = common_grid
        env.cellSize = common_grid
        env.extent = zone_fc

        zone_desc = arcpy.Describe(zone_fc)
        zone_raster = 'convertraster'
        if zone_desc.dataType not in ['RasterDataset', 'RasterLayer']:
            zone_raster = arcpy.PolygonToRaster_conversion(
                zone_fc,
                zone_field,
                zone_raster,
                'CELL_CENTER',
                cellsize=env.cellSize)
            print('cell size is {}'.format(env.cellSize))
            zone_size = int(env.cellSize)
        else:
            zone_raster = zone_fc
            zone_size = min(
                arcpy.Describe(zone_raster).meanCellHeight,
                arcpy.Describe(zone_raster).meanCellWidth)
            raster_size = min(
                arcpy.Describe(in_value_raster).meanCellHeight,
                arcpy.Describe(in_value_raster).meanCellWidth)
            env.cellSize = min([zone_size, raster_size])
            print('cell size is {}'.format(env.cellSize))

        # I tested and there is no need to resample the raster being summarized. It will be resampled correctly
        # internally in the following tool given that the necessary environments are set above (cell size, snap).
        # # in_value_raster = arcpy.Resample_management(in_value_raster, 'in_value_raster_resampled', CELL_SIZE)
        if not is_thematic:
            arcpy.AddMessage("Calculating Zonal Statistics...")
            temp_entire_table = arcpy.sa.ZonalStatisticsAsTable(
                zone_raster, zone_field, in_value_raster, 'temp_zonal_table',
                'DATA', 'MEAN')

        if is_thematic:
            # for some reason env.cellSize doesn't work
            # calculate/doit
            arcpy.AddMessage("Tabulating areas...")
            temp_entire_table = arcpy.sa.TabulateArea(
                zone_raster,
                zone_field,
                in_value_raster,
                'Value',
                'temp_area_table',
                processing_cell_size=env.cellSize)
            # TabulateArea capitalizes the zone for some annoying reason and ArcGIS is case-insensitive to field names
            # so we have this work-around:
            zone_field_t = '{}_t'.format(zone_field)
            DM.AddField(temp_entire_table,
                        zone_field_t,
                        'TEXT',
                        field_length=20)
            expr = '!{}!'.format(zone_field.upper())
            DM.CalculateField(temp_entire_table, zone_field_t, expr, 'PYTHON')
            DM.DeleteField(temp_entire_table, zone_field.upper())
            DM.AlterField(temp_entire_table,
                          zone_field_t,
                          zone_field,
                          clear_field_alias=True)

            # replaces join to Zonal Stats in previous versions of tool
            # no joining, just calculate the area/count from what's produced by TabulateArea
            arcpy.AddField_management(temp_entire_table, 'AREA', 'DOUBLE')
            arcpy.AddField_management(temp_entire_table, 'COUNT', 'DOUBLE')

            cursor_fields = ['AREA', 'COUNT']
            value_fields = [
                f.name for f in arcpy.ListFields(temp_entire_table, 'VALUE*')
            ]
            cursor_fields.extend(value_fields)
            with arcpy.da.UpdateCursor(temp_entire_table,
                                       cursor_fields) as uCursor:
                for uRow in uCursor:
                    area, count, value_fields = uRow[0], uRow[1], uRow[2:]
                    area = sum(value_fields)
                    count = round(
                        area / (int(env.cellSize) * int(env.cellSize)), 0)
                    new_row = [area, count] + value_fields
                    uCursor.updateRow(new_row)

        arcpy.AddMessage("Refining output table...")

        arcpy.AddField_management(temp_entire_table, 'datacoveragepct',
                                  'DOUBLE')
        arcpy.AddField_management(temp_entire_table, 'ORIGINAL_COUNT', 'LONG')

        # calculate datacoveragepct by comparing to original areas in zone raster
        # alternative to using JoinField, which is prohibitively slow if zones exceed hu12 count
        zone_raster_dict = {
            row[0]: row[1]
            for row in arcpy.da.SearchCursor(zone_raster,
                                             [zone_field, 'Count'])
        }
        temp_entire_table_dict = {
            row[0]: row[1]
            for row in arcpy.da.SearchCursor(temp_entire_table,
                                             [zone_field, 'COUNT'])
        }

        sum_cell_area = float(env.cellSize) * float(env.cellSize)
        orig_cell_area = zone_size * zone_size

        with arcpy.da.UpdateCursor(
                temp_entire_table,
            [zone_field, 'datacoveragepct', 'ORIGINAL_COUNT']) as cursor:
            for uRow in cursor:
                key_value, data_pct, count_orig = uRow
                count_orig = zone_raster_dict[key_value]
                if key_value in temp_entire_table_dict:
                    count_summarized = temp_entire_table_dict[key_value]
                    data_pct = 100 * float((count_summarized * sum_cell_area) /
                                           (count_orig * orig_cell_area))
                else:
                    data_pct = None
                cursor.updateRow((key_value, data_pct, count_orig))

        # Refine the output
        refine_zonal_output(temp_entire_table)

        # in order to add vector capabilities back, need to do something with this
        # right now we just can't fill in polygon zones that didn't convert to raster in our system
        stats_result = cu.one_in_one_out(temp_entire_table, zone_fc,
                                         zone_field, out_table)

        # Convert "datacoveragepct" and "ORIGINAL_COUNT" values to 0 for zones with no metrics calculated
        with arcpy.da.UpdateCursor(
                out_table,
            [zone_field, 'datacoveragepct', 'ORIGINAL_COUNT', 'CELL_COUNT'
             ]) as u_cursor:
            for row in u_cursor:
                # data_coverage pct to 0
                if row[1] is None:
                    row[1] = 0
                # original count filled in if a) zone outside raster bounds or b) zone too small to be rasterized
                if row[2] is None:
                    if row[0] in zone_raster_dict:
                        row[2] = zone_raster_dict[row[0]]
                    else:
                        row[2] = 0
                # cell count set to 0
                if row[3] is None:
                    row[3] = 0
                u_cursor.updateRow(row)

        # count whether all zones got an output record or not)
        out_count = int(
            arcpy.GetCount_management(temp_entire_table).getOutput(0))
        in_count = int(arcpy.GetCount_management(zone_fc).getOutput(0))
        count_diff = in_count - out_count

        # cleanup
        if not debug_mode:
            for item in [
                    'temp_zonal_table', temp_entire_table, 'convertraster'
            ]:  # don't add zone_raster, orig
                arcpy.Delete_management(item)
        arcpy.ResetEnvironments()
        env.workspace = orig_env  # hope this prevents problems using list of FCs from workspace as batch
        arcpy.CheckInExtension("Spatial")

        return [stats_result, count_diff]

    def unflatten(intermediate_table):
        flat_zoneid = zone_field
        unflat_zoneid = zone_field.replace('flat', '')
        zone_type = [f.type for f in arcpy.ListFields(zone_fc, flat_zoneid)][0]
        # Set up the output table (can't do this until the prior tool is run)
        # if os.path.dirname(out_table):
        #     out_path = os.path.dirname(out_table)
        # else:
        #     out_path = orig_env

        unflat_result = DM.CreateTable('in_memory',
                                       os.path.basename(out_table))

        # get the fields to add to the table
        editable_fields = [
            f for f in arcpy.ListFields(intermediate_table)
            if f.editable and f.name.lower() != flat_zoneid.lower()
        ]

        # populate the new table schema
        DM.AddField(unflat_result, unflat_zoneid, zone_type)
        for f in editable_fields:
            DM.AddField(unflat_result, f.name, f.type, field_length=f.length)

        # map original zone ids to new zone ids
        original_flat = defaultdict(list)
        with arcpy.da.SearchCursor(unflat_table,
                                   [unflat_zoneid, flat_zoneid]) as cursor:
            for row in cursor:
                if row[1] not in original_flat[row[0]]:
                    original_flat[row[0]].append(row[1])

        # Use CELL_COUNT as weight for means to calculate final values for each zone.
        fixed_fields = [
            unflat_zoneid, 'ORIGINAL_COUNT', 'CELL_COUNT', 'datacoveragepct'
        ]
        other_field_names = [
            f.name for f in editable_fields if f.name not in fixed_fields
        ]
        i_cursor = arcpy.da.InsertCursor(
            unflat_result,
            fixed_fields + other_field_names)  # open output table cursor
        flat_stats = {
            r[0]: r[1:]
            for r in arcpy.da.SearchCursor(intermediate_table, [
                flat_zoneid, 'ORIGINAL_COUNT', 'CELL_COUNT', 'datacoveragepct'
            ] + other_field_names)
        }

        count_diff = 0
        for zid, unflat_ids in original_flat.items():
            valid_unflat_ids = [id for id in unflat_ids if id in flat_stats
                                ]  # skip flatpolys not rasterized
            area_vec = [flat_stats[id][0] for id in valid_unflat_ids
                        ]  # ORIGINAL_COUNT specified in 0 index earlier
            cell_vec = [flat_stats[id][1] for id in valid_unflat_ids]
            coverage_vec = [flat_stats[id][2] for id in valid_unflat_ids
                            ]  # datacoveragepct special handling
            stat_vectors_by_id = [
                flat_stats[id][3:] for id in valid_unflat_ids
            ]  # "the rest", list of lists

            # calc the new summarized values
            original_count = sum(
                filter(None, area_vec)
            )  # None area is functionally equivalent to 0, all Nones = 0 too
            cell_count = sum(filter(None, cell_vec))
            if cell_count > 0:
                weighted_coverage = sum(
                    [a * b
                     for a, b in zip(area_vec, coverage_vec)]) / original_count

                # this calculation accounts for fractional missing values, both kinds (whole zone is no data, or zone
                # was missing some data and had data coverage % < 100). This is done by converting None to 0
                # and by using the cell_count (count of cells with data present)
                # instead of the full zone original_count. You have to do both or the mean will be distorted.
                # hand-verification that this works as intended using test GIS data on was completed 2019-11-01 by NJS
                crossprods = []
                for i in range(0, len(valid_unflat_ids)):
                    crossprods.append([
                        cell_vec[i] * float(s or 0)
                        for s in stat_vectors_by_id[i]
                    ])

                weighted_stat_means = []
                for i in range(0, len(other_field_names)):
                    weighted_stat_means.append(
                        sum(zip(*crossprods)[i]) / cell_count)
            else:
                weighted_coverage = 0
                weighted_stat_means = [None] * len(other_field_names)
                count_diff += 1

            new_row = [zid, original_count, cell_count, weighted_coverage
                       ] + weighted_stat_means
            i_cursor.insertRow(new_row)
        del i_cursor

        DM.Delete(intermediate_table)

        return [unflat_result, count_diff]

    def rename_to_standard(table):
        arcpy.AddMessage("Renaming.")
        # datacoverage just gets tag
        new_datacov_name = '{}_datacoveragepct'.format(rename_tag)
        cu.rename_field(table,
                        'datacoveragepct',
                        new_datacov_name,
                        deleteOld=True)
        # DM.AlterField(out_table, 'datacoveragepct', new_datacov_name, clear_field_alias=True)
        if not is_thematic:
            new_mean_name = '{}_{}'.format(rename_tag, units).rstrip(
                '_')  # if no units, just rename_tag
            cu.rename_field(table, 'MEAN', new_mean_name, deleteOld=True)
            # DM.AlterField(out_table, 'MEAN', new_mean_name, clear_field_alias=True)
        else:
            # look up the values based on the rename tag
            geo_file = os.path.abspath('../geo_metric_provenance.csv')
            with open(geo_file) as csv_file:
                reader = csv.DictReader(csv_file)
                mapping = {
                    row['subgroup_original_code']: row['subgroup']
                    for row in reader if row['main_feature']
                    and row['main_feature'] in rename_tag
                }
                print(mapping)

            # update them
            for old, new in mapping.items():
                old_fname = 'VALUE_{}_pct'.format(old)
                new_fname = '{}_{}_pct'.format(rename_tag, new)
                if arcpy.ListFields(table, old_fname):
                    try:
                        # same problem with AlterField limit of 31 characters here.
                        DM.AlterField(table,
                                      old_fname,
                                      new_fname,
                                      clear_field_alias=True)
                    except:
                        cu.rename_field(table,
                                        old_fname,
                                        new_fname,
                                        deleteOld=True)
        return table

    if unflat_table:
        if not arcpy.Exists(unflat_table):
            raise Exception('Unflat_table must exist.')
        intermediate_stats = stats_area_table(out_table='intermediate_stats')
        named_as_original = unflatten(intermediate_stats[0])
    else:
        named_as_original = stats_area_table(out_table='named_as_original')

    if rename_tag:
        named_as_standard = rename_to_standard(named_as_original[0])
        out_table = DM.CopyRows(named_as_standard, out_table)
    else:
        out_table = DM.CopyRows(named_as_original[0], out_table)

    total_count_diff = named_as_original[1]

    if total_count_diff > 0:
        warn_msg = (
            "WARNING: {0} zones have null zonal statistics. There are 2 possible reasons:\n"
            "1) Presence of zones that are fully outside the extent of the raster summarized.\n"
            "2) Zones are too small relative to the raster resolution.".format(
                total_count_diff))
        arcpy.AddWarning(warn_msg)

    arcpy.SetLogHistory(True)

    return out_table