コード例 #1
0
ファイル: LineFrequency.py プロジェクト: LEONOB2014/NetworkGT
def main(infc, sampling, mask, outfc, trim):

    try:

        if 'shp' in outfc:
            arcpy.AddError('Output parameter must be saved in a geodatabase')
            sys.exit()

        dname = os.path.dirname(outfc)
        fname = os.path.basename(outfc)

        if mask:

            arcpy.Intersect_analysis([sampling, mask], "in_memory\\lines",
                                     "ONLY_FID", "", "")
            arcpy.MultipartToSinglepart_management("in_memory\\lines",
                                                   "in_memory\\lines_sp")
            sampling = "in_memory\\lines_sp"

            dfields = []
            for field in arcpy.ListFields(sampling):
                if not field.required:
                    dfields.append(field.name)

            arcpy.DeleteField_management(sampling, dfields)

        curfields = [f.name for f in arcpy.ListFields(sampling)]

        if 'Sample_No_' not in curfields:
            arcpy.AddField_management(sampling, 'Sample_No_', 'DOUBLE')

        with arcpy.da.UpdateCursor(sampling, ['OID@', 'Sample_No_']) as cursor:
            for feature in cursor:
                try:
                    feature[1] = feature[0]
                    cursor.updateRow(feature)

                except Exception, e:  #No Connection?
                    arcpy.AddError('%s' % (e))
                    continue

        del cursor, feature

        arcpy.FeatureVerticesToPoints_management(sampling, "in_memory\\start",
                                                 "START")
        sources = {}
        with arcpy.da.SearchCursor("in_memory\\start",
                                   ['SHAPE@', 'Sample_No_']) as cursor:
            for feature in cursor:
                start = feature[0].firstPoint
                start = (round(start.X, 4), round(start.Y, 4))
                sources[feature[1]] = start

        del cursor, feature

        infields = [(f.name, f.type) for f in arcpy.ListFields(infc)]

        arcpy.Intersect_analysis([sampling, infc], "in_memory\\int", "", "",
                                 "POINT")

        arcpy.SplitLineAtPoint_management(sampling, "in_memory\\int", outfc, 1)

        curfields = [f.name for f in arcpy.ListFields(outfc)]

        if 'Distance' not in curfields:
            arcpy.AddField_management(outfc, 'Distance', "DOUBLE")

        if 'Count' not in curfields:
            arcpy.AddField_management(outfc, 'Count', "SHORT")

        edges = {}
        points = []
        arcpy.CreateFeatureclass_management("in_memory", "point", "POINT", '',
                                            'ENABLED', '', infc)

        arcpy.AddMessage('Calculating Edges')
        with arcpy.da.InsertCursor("in_memory\\point", ["SHAPE@"]) as cursor2:
            with arcpy.da.SearchCursor(outfc,
                                       ['SHAPE@', 'Sample_No_']) as cursor:

                for feature in cursor:
                    start = feature[0].firstPoint
                    end = feature[0].lastPoint
                    pnts1, pnts2 = [(round(start.X, 4), round(start.Y, 4)),
                                    (round(end.X, 4), round(end.Y, 4))]
                    Length = feature[0].length
                    ID = feature[1]

                    if ID in edges:
                        edges[ID].add_edge(pnts1, pnts2, weight=Length)
                    else:
                        G = nx.Graph()
                        G.add_edge(pnts1, pnts2, weight=Length)
                        edges[ID] = G

                    if pnts1 not in points:
                        points.append(pnts1)
                        cursor2.insertRow([pnts1])

                    if pnts2 not in points:
                        points.append(pnts2)
                        cursor2.insertRow([pnts2])

        del cursor, cursor2, feature, G

        arcpy.SpatialJoin_analysis("in_memory\\point", infc, "in_memory\\sj",
                                   "", "KEEP_COMMON")

        data = {}

        fields = []
        for field, ftype in infields:
            if field != 'SHAPE@' and ftype != 'OID' and field not in curfields:
                arcpy.AddField_management(outfc, field, ftype)
                fields.append(field)

        fields.append('Shape@')
        with arcpy.da.SearchCursor("in_memory\\sj", fields) as cursor:
            for feature in cursor:
                d = {}
                start = feature[-1].firstPoint
                start = (round(start.X, 4), round(start.Y, 4))
                for enum, field in enumerate(fields[:-1]):
                    d[field] = feature[enum]
                data[start] = d
        del cursor, feature

        Lengths = {}

        fields.extend(['Distance', 'Sample_No_', 'Count'])

        arcpy.AddMessage('Updating Features')
        with arcpy.da.UpdateCursor(outfc, fields) as cursor:
            for feature in cursor:
                try:

                    start = feature[-4].firstPoint
                    end = feature[-4].lastPoint
                    startx, starty = (round(start.X, 4), round(start.Y, 4))
                    endx, endy = (round(end.X, 4), round(end.Y, 4))

                    ID = feature[-2]

                    if ID not in Lengths:
                        G = edges[ID]
                        Source = sources[ID]

                        Length, Path = nx.single_source_dijkstra(
                            G, Source, weight='weight')
                        Index = max(Length, key=Length.get)

                        Lengths[ID] = [Length]
                        Length, Path = nx.single_source_dijkstra(
                            G, Source, weight='weight')
                        G.clear()
                        Lengths[ID].append(Length)

                    L = [
                        Lengths[ID][0][(endx, endy)],
                        Lengths[ID][0][(startx, starty)]
                    ]

                    feature[-3] = max(L)
                    feature[-1] = 1

                    v = L.index(max(L))

                    if v == 1:
                        FID = (startx, starty)
                    else:
                        FID = (endx, endy)

                    if FID in data:
                        d = data[FID]
                        for enum, field in enumerate(fields[:-4]):
                            if field in d:
                                feature[enum] = d[field]

                    cursor.updateRow(feature)
                except Exception, e:  #No Connection?
                    arcpy.AddError('%s' % (e))
                    break
コード例 #2
0
def generatePointsFromFeatures(inputFC, descInput, zerodate=False):
    def attHelper(row):
        # helper function to get/set field attributes for output gpx file

        pnt = row[1].getPart()
        valuesDict["PNTX"] = str(pnt.X)
        valuesDict["PNTY"] = str(pnt.Y)

        Z = pnt.Z if descInput.hasZ else None
        if Z or ("ELEVATION" in cursorFields):
            valuesDict["ELEVATION"] = str(Z) if Z else str(
                row[fieldNameDict["ELEVATION"]])
        else:
            valuesDict["ELEVATION"] = str(0)

        valuesDict["NAME"] = row[
            fieldNameDict["NAME"]] if "NAME" in fields else " "
        valuesDict["DESCRIPT"] = row[
            fieldNameDict["DESCRIPT"]] if "DESCRIPT" in fields else " "

        if "DATETIMES" in fields:
            row_time = row[fieldNameDict["DATETIMES"]]
            formatted_time = row_time if row_time else " "
        elif zerodate and "DATETIMES" not in fields:
            formatted_time = time.strftime("%Y-%m-%dT%H:%M:%SZ",
                                           time.gmtime(0))
        else:
            formatted_time = time.strftime("%Y-%m-%dT%H:%M:%SZ",
                                           time.gmtime(0)) if zerodate else " "

        valuesDict["DATETIMES"] = formatted_time

        return

    #-------------end helper function-----------------

    def getValuesFromFC(inputFC, cursorFields):

        previousPartNum = 0
        startTrack = True

        # Loop through all features and parts
        with arcpy.da.SearchCursor(inputFC,
                                   cursorFields,
                                   spatial_reference="4326",
                                   explode_to_points=True) as searchCur:
            for row in searchCur:
                if descInput.shapeType == "Polyline":
                    for part in row:
                        try:
                            newPart = False
                            if not row[
                                    0] == previousPartNum or startTrack is True:
                                startTrack = False
                                newPart = True
                            previousPartNum = row[0]

                            attHelper(row)
                            yield "trk", newPart
                        except:
                            arcpy.AddWarning(
                                "Problem reading values for row: {}. Skipping."
                                .format(row[0]))

                elif descInput.shapeType == "Multipoint" or descInput.shapeType == "Point":
                    # check to see if data was original GPX with "Type" of "TRKPT" or "WPT"
                    trkType = row[fieldNameDict["TYPE"]].upper(
                    ) if "TYPE" in fields else None
                    try:
                        attHelper(row)

                        if trkType == "TRKPT":
                            newPart = False
                            if previousPartNum == 0:
                                newPart = True
                                previousPartNum = 1

                            yield "trk", newPart

                        else:
                            yield "wpt", None
                    except:
                        arcpy.AddWarning(
                            "Problem reading values for row: {}. Skipping.".
                            format(row[0]))

    # ---------end get values function-------------

    # Get list of available fields
    fields = [f.name.upper() for f in arcpy.ListFields(inputFC)]
    valuesDict = {
        "ELEVATION": 0,
        "NAME": "",
        "DESCRIPT": "",
        "DATETIMES": "",
        "TYPE": "",
        "PNTX": 0,
        "PNTY": 0
    }
    fieldNameDict = {
        "ELEVATION": 0,
        "NAME": 1,
        "DESCRIPT": 2,
        "DATETIMES": 3,
        "TYPE": 4,
        "PNTX": 5,
        "PNTY": 6
    }

    cursorFields = ["OID@", "SHAPE@"]

    for key, item in valuesDict.items():
        if key in fields:
            fieldNameDict[key] = len(cursorFields)  # assign current index
            cursorFields.append(key)  # build up list of fields for cursor
        else:
            fieldNameDict[key] = None

    for index, gpxValues in enumerate(getValuesFromFC(inputFC, cursorFields)):

        if gpxValues[0] == "wpt":
            wpt = ET.SubElement(gpx, 'wpt', {
                'lon': valuesDict["PNTX"],
                'lat': valuesDict["PNTY"]
            })
            wptEle = ET.SubElement(wpt, "ele")
            wptEle.text = valuesDict["ELEVATION"]
            wptTime = ET.SubElement(wpt, "time")
            wptTime.text = valuesDict["DATETIMES"]
            wptName = ET.SubElement(wpt, "name")
            wptName.text = valuesDict["NAME"]
            wptDesc = ET.SubElement(wpt, "desc")
            wptDesc.text = valuesDict["DESCRIPT"]

        else:  #TRKS
            if gpxValues[1]:
                # Elements for the start of a new track
                trk = ET.SubElement(gpx, "trk")
                trkName = ET.SubElement(trk, "name")
                trkName.text = valuesDict["NAME"]
                trkDesc = ET.SubElement(trk, "desc")
                trkDesc.text = valuesDict["DESCRIPT"]
                trkSeg = ET.SubElement(trk, "trkseg")

            trkPt = ET.SubElement(trkSeg, "trkpt", {
                'lon': valuesDict["PNTX"],
                'lat': valuesDict["PNTY"]
            })
            trkPtEle = ET.SubElement(trkPt, "ele")
            trkPtEle.text = valuesDict["ELEVATION"]
            trkPtTime = ET.SubElement(trkPt, "time")
            trkPtTime.text = valuesDict["DATETIMES"]
コード例 #3
0
ファイル: canopy.py プロジェクト: ztpilgrim/canopy
def assign_phyregs_to_naipqq():
    '''
    This function adds the phyregs field to the NAIP QQ shapefile and populates
    it with physiographic region IDs that intersect each NAIP tile. This
    function needs to be run only once, but running it multiple times would not
    hurt either other than wasting computational resources.
    '''
    phyregs_layer = canopy_config.phyregs_layer
    phyregs_area_sqkm_field = canopy_config.phyregs_area_sqkm_field
    naipqq_layer = canopy_config.naipqq_layer
    naipqq_phyregs_field = canopy_config.naipqq_phyregs_field

    # calculate phyregs_area_sqkm_field
    fields = arcpy.ListFields(phyregs_layer, phyregs_area_sqkm_field)
    for field in fields:
        if field.name == phyregs_area_sqkm_field:
            arcpy.DeleteField_management(phyregs_layer, phyregs_area_sqkm_field)
            break
    arcpy.AddField_management(phyregs_layer, phyregs_area_sqkm_field, 'DOUBLE')
    arcpy.management.CalculateGeometryAttributes(phyregs_layer,
            [[phyregs_area_sqkm_field, 'AREA']], '', 'SQUARE_KILOMETERS')

    # calculate naipqq_phyregs_field
    fields = arcpy.ListFields(naipqq_layer, naipqq_phyregs_field)
    for field in fields:
        if field.name == naipqq_phyregs_field:
            arcpy.DeleteField_management(naipqq_layer, naipqq_phyregs_field)
            break
    arcpy.AddField_management(naipqq_layer, naipqq_phyregs_field, 'TEXT',
            field_length=100)

    # make sure to clear selection because most geoprocessing tools use
    # selected features, if any
    arcpy.SelectLayerByAttribute_management(phyregs_layer, 'CLEAR_SELECTION')
    arcpy.SelectLayerByAttribute_management(naipqq_layer, 'CLEAR_SELECTION')

    # initialize the phyregs field to ,
    arcpy.CalculateField_management(naipqq_layer, naipqq_phyregs_field, '","')

    # for each physiographic region
    with arcpy.da.SearchCursor(phyregs_layer, ['NAME', 'PHYSIO_ID']) as cur:
        for row in sorted(cur):
            name = row[0]
            print(name)
            phyreg_id = row[1]
            # select the current physiographic region
            arcpy.SelectLayerByAttribute_management(phyregs_layer,
                    where_clause='PHYSIO_ID=%d' % phyreg_id)
            # select intersecting naip qq features
            arcpy.SelectLayerByLocation_management(naipqq_layer,
                    select_features=phyregs_layer)
            # append phyreg_id + , so the result becomes ,...,#,
            arcpy.CalculateField_management(naipqq_layer, naipqq_phyregs_field,
                    '!%s!+"%d,"' % (naipqq_phyregs_field, phyreg_id),
                    'PYTHON_9.3')

    # clear selection again
    arcpy.SelectLayerByAttribute_management(phyregs_layer, 'CLEAR_SELECTION')
    arcpy.SelectLayerByAttribute_management(naipqq_layer, 'CLEAR_SELECTION')

    print('Completed')
import os, sys
import arcpy

src = arcpy.GetParameterAsText(0)# r"D:\OMM_GIS\Projects\DoP_InterCensus_2019\01_EA\EA_copiled"NPT_EA_116(export).shp"
srcField = arcpy.GetParameterAsText(1) # Field to get attribute value eg. "VTCODE"
outDir = arcpy.GetParameterAsText(2) # output directory to save new files eg. r"C:\TempGIS\DOP\export"
preffix = arcpy.GetParameterAsText(3) # option for prefix for new file name. if not provided, field name will be use.
if not preffix:
	preffix = srcField
preffix = preffix + "_"
#fcList = arcpy.ListFeatureClasses(src)
setAttr = []
row_count = 0

# verify field type as string or integer for where clause construction
fields = arcpy.ListFields(src)
for field in fields:
	if field.name == srcField:
		if field.type == "String":
			where_clause_template = '"{}" = \'{}\''
		elif field.type=="Integer":
			where_clause_template = '"{}" = {}'
		else:
			print ("You have differnt data type other than \"string & numeric\" data types. \nScript stopped" )
			sys.exit()

# fetch field value and make unique list
with arcpy.da.SearchCursor(src,[srcField]) as cursor:
	for row in cursor:
		setAttr.append(row[0])
		row_count += 1
コード例 #5
0
## Este script comprueba si un campo existe, y si no lo crea
##

# Modulos
import arcpy, os
 
# Variables de entorno
ruta = 'C:\\...'
env.workspace = ruta
env.overwriteOutput = True

# Variables locales
nuevo_campo = 'nombre_campo'
capa = 'nombre_capa.shp'
listaCampos = arcpy.ListFields(capa)
existencia = 0

# Comprobar si existe 
for campo in listaCampos:
    if campo.name == nuevo_campo:
        existencia = 1
        
# Si existe no hacer nada 
if existencia == 1:
    print('El campo ' + nuevo_campo + ' ya existe')
    
# Si no existe crearlo
else:
    arcpy.AddField_management(capa,nuevo_campo,tipo...)
    print('El campo ' + nuevo_campo + ' ha sido creado') 
コード例 #6
0
def SLEM(Line, Distance, Output, TempFolder, TF):
    
    CopyLine = arcpy.CopyFeatures_management(Line, "%ScratchWorkspace%\CopyLine")
    
    fieldnames = [f.name for f in arcpy.ListFields(CopyLine)]

    #/identification of the polyline type : raw, UGOs, sequenced UGOs, or AGOs
    k = 0
    if "Rank_AGO" in fieldnames :
        k = 3
    elif "Order_ID" in fieldnames :
        k = 2
    elif "Rank_UGO" in fieldnames :
        k = 1
            
    arcpy.AddMessage(k)
    
            

    ################################
    ########## Raw polyline ########
    ################################
    #
    if k == 0 :
        
        #/shaping of the segmented result
        arcpy.AddField_management(CopyLine, "Rank_UGO", "LONG", "", "", "", "", "NULLABLE", "NON_REQUIRED", "")
        arcpy.CalculateField_management(CopyLine, "Rank_UGO", "!"+fieldnames[0]+"!", "PYTHON_9.3", "")
        arcpy.AddField_management(CopyLine, "From_Measure", "DOUBLE", "", "", "", "", "NULLABLE", "NON_REQUIRED", "")
        arcpy.CalculateField_management(CopyLine, "From_Measure", "0", "PYTHON_9.3", "")
        arcpy.AddField_management(CopyLine, "To_Measure", "DOUBLE", "", "", "", "", "NULLABLE", "NON_REQUIRED", "")
        arcpy.CalculateField_management(CopyLine, "To_Measure", "!shape.length!", "PYTHON_9.3", "")
        
        #/conversion in routes
        LineRoutes = arcpy.CreateRoutes_lr(CopyLine, "Rank_UGO", "%ScratchWorkspace%\\LineRoutes", "TWO_FIELDS", "From_Measure", "To_Measure")
        
        #/creation of the event table
        PointEventTEMP = arcpy.CreateTable_management("%ScratchWorkspace%", "PointEventTEMP", "", "")
        arcpy.AddField_management(PointEventTEMP, "Rank_UGO", "LONG", "", "", "", "", "NULLABLE", "NON_REQUIRED", "")
        arcpy.AddField_management(PointEventTEMP, "Distance", "DOUBLE", "", "", "", "", "NULLABLE", "NON_REQUIRED", "")
        arcpy.AddField_management(PointEventTEMP, "To_M", "DOUBLE", "", "", "", "", "NULLABLE", "NON_REQUIRED", "")
        
        UPD_SL.UpToDateShapeLengthField(LineRoutes)

        rowslines = arcpy.SearchCursor(LineRoutes)
        rowsevents = arcpy.InsertCursor(PointEventTEMP)
        for line in rowslines:
            tempdistance = float(0)
            while (tempdistance < float(line.Shape_Length)):
                row = rowsevents.newRow()
                row.Rank_UGO = line.Rank_UGO
                row.To_M = tempdistance + float(Distance)
                row.Distance = tempdistance
                rowsevents.insertRow(row)
                tempdistance = tempdistance + float(Distance)
        del rowslines
        del rowsevents

        #/creation of the route event layer
        MakeRouteEventTEMP = arcpy.MakeRouteEventLayer_lr(LineRoutes, "Rank_UGO", PointEventTEMP, "Rank_UGO LINE Distance To_M", "%ScratchWorkspace%\\MakeRouteEventTEMP")
        Split = arcpy.CopyFeatures_management(MakeRouteEventTEMP, "%ScratchWorkspace%\\Split", "", "0", "0", "0")
        Sort = arcpy.Sort_management(Split, Output, [["Rank_UGO", "ASCENDING"], ["Distance", "ASCENDING"]])

        arcpy.DeleteField_management(Sort, "To_M")
        
        #/calculation of the "Distance" field
        UPD_SL.UpToDateShapeLengthField(Sort)
        
        rows1 = arcpy.UpdateCursor(Sort)
        rows2 = arcpy.UpdateCursor(Sort)
        line2 = rows2.next()
        line2.Distance = 0
        rows2.updateRow(line2)
        nrows = int(str(arcpy.GetCount_management(Sort)))
        n = 0
        for line1 in rows1 :
            line2 = rows2.next()          
            if n == nrows-1 :
                break
            if n == 0 :
                line1.Distance = 0
            if line2.Rank_UGO == line1.Rank_UGO :
                line2.Distance = line1.Distance + line1.Shape_Length
                rows2.updateRow(line2)
            if line2.Rank_UGO != line1.Rank_UGO :
                line2.Distance = 0
                rows2.updateRow(line2)
            
            n+=1
        
        #/deleting of the temporary files
        if str(TF) == "true" :
            arcpy.Delete_management(Split)
            arcpy.Delete_management(CopyLine)
            arcpy.Delete_management(LineRoutes)
            arcpy.Delete_management(PointEventTEMP)
    
    
         
    
    
    
    
    
    ##################
    ###### UGO #######
    ##################
    if k == 1 :    
        
        #/shaping of the segmented result
        arcpy.AddField_management(CopyLine, "From_Measure", "DOUBLE", "", "", "", "", "NULLABLE", "NON_REQUIRED", "")
        arcpy.CalculateField_management(CopyLine, "From_Measure", "0", "PYTHON_9.3", "")
        arcpy.AddField_management(CopyLine, "To_Measure", "DOUBLE", "", "", "", "", "NULLABLE", "NON_REQUIRED", "")
        arcpy.CalculateField_management(CopyLine, "To_Measure", "!shape.length!", "PYTHON_9.3", "")
        
        #/conversion in routes
        LineRoutes = arcpy.CreateRoutes_lr(CopyLine, "Rank_UGO", "%ScratchWorkspace%\\LineRoutes", "TWO_FIELDS", "From_Measure", "To_Measure")
        
        #/creation of the event table
        PointEventTEMP = arcpy.CreateTable_management("%ScratchWorkspace%", "PointEventTEMP", "", "")
        arcpy.AddField_management(PointEventTEMP, "Rank_UGO", "LONG", "", "", "", "", "NULLABLE", "NON_REQUIRED", "")
        arcpy.AddField_management(PointEventTEMP, "Distance", "DOUBLE", "", "", "", "", "NULLABLE", "NON_REQUIRED", "")
        arcpy.AddField_management(PointEventTEMP, "To_M", "DOUBLE", "", "", "", "", "NULLABLE", "NON_REQUIRED", "")
        
        UPD_SL.UpToDateShapeLengthField(LineRoutes)

        rowslines = arcpy.SearchCursor(LineRoutes)
        rowsevents = arcpy.InsertCursor(PointEventTEMP)
        for line in rowslines:
            tempdistance = float(0)
            while (tempdistance < float(line.Shape_Length)):
                row = rowsevents.newRow()
                row.Rank_UGO = line.Rank_UGO
                row.To_M = tempdistance + float(Distance)
                row.Distance = tempdistance
                rowsevents.insertRow(row)
                tempdistance = tempdistance + float(Distance)
        del rowslines
        del rowsevents
        
        #/creation of the route event layer
        MakeRouteEventTEMP = arcpy.MakeRouteEventLayer_lr(LineRoutes, "Rank_UGO", PointEventTEMP, "Rank_UGO LINE Distance To_M", "%ScratchWorkspace%\\MakeRouteEventTEMP")
        Split = arcpy.CopyFeatures_management(MakeRouteEventTEMP, "%ScratchWorkspace%\\Split", "", "0", "0", "0")
        Sort = arcpy.Sort_management(Split, Output, [["Rank_UGO", "ASCENDING"], ["Distance", "ASCENDING"]])

        arcpy.DeleteField_management(Sort, "To_M")
        
        #/calculation of the "Distance" field
        UPD_SL.UpToDateShapeLengthField(Sort)
        
        rows1 = arcpy.UpdateCursor(Sort)
        rows2 = arcpy.UpdateCursor(Sort)
        line2 = rows2.next()
        line2.Distance = 0
        rows2.updateRow(line2)
        nrows = int(str(arcpy.GetCount_management(Sort)))
        n = 0
        for line1 in rows1 :
            line2 = rows2.next()          
            if n == nrows-1 :
                break
            if n == 0 :
                line1.Distance = 0
            if line2.Rank_UGO == line1.Rank_UGO :
                line2.Distance = line1.Distance + line1.Shape_Length
                rows2.updateRow(line2)
            if line2.Rank_UGO != line1.Rank_UGO :
                line2.Distance = 0
                rows2.updateRow(line2)
            
            n+=1
        
        #/deleting of the temporary files
        if str(TF) == "true" :
            arcpy.Delete_management(Split)
            arcpy.Delete_management(CopyLine)
            arcpy.Delete_management(LineRoutes)
            arcpy.Delete_management(PointEventTEMP)
    
    
    
    
    
    
    
    
    ################################
    ######### Sequenced UGO ########
    ################################
    if k == 2 :    
        
        #/shaping of the segmented result
        arcpy.AddField_management(CopyLine, "From_Measure", "DOUBLE", "", "", "", "", "NULLABLE", "NON_REQUIRED", "")
        arcpy.CalculateField_management(CopyLine, "From_Measure", "0", "PYTHON_9.3", "")
        arcpy.AddField_management(CopyLine, "To_Measure", "DOUBLE", "", "", "", "", "NULLABLE", "NON_REQUIRED", "")
        arcpy.CalculateField_management(CopyLine, "To_Measure", "!Shape_Length!", "PYTHON_9.3", "")
          
        #/conversion in routes
        LineRoutes = arcpy.CreateRoutes_lr(CopyLine, "Rank_UGO", "%ScratchWorkspace%\\LineRoutes", "TWO_FIELDS", "From_Measure", "To_Measure")
        arcpy.AddField_management(LineRoutes, "Order_ID", "LONG", "", "", "", "", "NULLABLE", "NON_REQUIRED", "")
        Sort = arcpy.Sort_management(Line, "%ScratchWorkspace%\\Sort", [["Rank_UGO", "ASCENDING"]])

        rows1 = arcpy.UpdateCursor(LineRoutes)
        rows2 = arcpy.SearchCursor(Sort)
        
        for line1 in rows1 :
            line2 = rows2.next()
            line1.Order_ID = line2.Order_ID
            rows1.updateRow(line1)
            
        #/creation of the event table
        PointEventTEMP = arcpy.CreateTable_management("%ScratchWorkspace%", "PointEventTEMP", "", "")
        arcpy.AddField_management(PointEventTEMP, "To_M", "DOUBLE", "", "", "", "", "NULLABLE", "NON_REQUIRED", "")
        arcpy.AddField_management(PointEventTEMP, "Order_ID", "LONG", "", "", "", "", "NULLABLE", "NON_REQUIRED", "")
        arcpy.AddField_management(PointEventTEMP, "Rank_UGO", "LONG", "", "", "", "", "NULLABLE", "NON_REQUIRED", "")
        arcpy.AddField_management(PointEventTEMP, "Distance", "DOUBLE", "", "", "", "", "NULLABLE", "NON_REQUIRED", "")
     
        UPD_SL.UpToDateShapeLengthField(LineRoutes)

        
        rowslines = arcpy.SearchCursor(LineRoutes)
        rowsevents = arcpy.InsertCursor(PointEventTEMP)
        for line in rowslines:
            tempdistance = float(0)
            while (tempdistance < float(line.Shape_Length)):
                row = rowsevents.newRow()
                row.To_M = tempdistance + float(Distance)
                row.Order_ID = line.Order_ID
                row.Rank_UGO = line.Rank_UGO
                row.Distance = tempdistance
                rowsevents.insertRow(row)
                tempdistance = tempdistance + float(Distance)
        del rowslines
        del rowsevents
        
        
        MakeRouteEventTEMP = arcpy.MakeRouteEventLayer_lr(LineRoutes, "Rank_UGO", PointEventTEMP, "Rank_UGO LINE Distance To_M", "%ScratchWorkspace%\\MakeRouteEventTEMP")
        Split = arcpy.CopyFeatures_management(MakeRouteEventTEMP, "%ScratchWorkspace%\\Split", "", "0", "0", "0")
        Sort = arcpy.Sort_management(Split, Output, [["Rank_UGO", "ASCENDING"], ["Distance", "ASCENDING"]])

        arcpy.DeleteField_management(Sort, "To_M")
        
        #/calculation of the "Distance" field
        UPD_SL.UpToDateShapeLengthField(Sort)
        
        rows1 = arcpy.UpdateCursor(Sort)
        rows2 = arcpy.UpdateCursor(Sort)
        line2 = rows2.next()
        line2.Distance = 0
        rows2.updateRow(line2)
        nrows = int(str(arcpy.GetCount_management(Split)))
        n = 0
        for line1 in rows1 :
            line2 = rows2.next()         
            if n >= nrows-1 :
                break
            if n == 0 :
                line1.Distance = 0
            if line2.Rank_UGO == line1.Rank_UGO :
                line2.Distance = line1.Distance + line1.Shape_Length
                rows2.updateRow(line2)
            if line2.Rank_UGO != line1.Rank_UGO :
                line2.Distance = 0
                rows2.updateRow(line2)
            
            n+=1
        #/deleting of the temporary files
        if str(TF) == "true" :
            arcpy.Delete_management(Split)
            arcpy.Delete_management(CopyLine)
            arcpy.Delete_management(LineRoutes)
            arcpy.Delete_management(PointEventTEMP)

    
    
    
    
    
    
    
    #############
    #### AGO ####
    #############
    if k == 3 :   
        
        #/shaping of the segmented result
        arcpy.AddField_management(CopyLine, "From_Measure", "DOUBLE", "", "", "", "", "NULLABLE", "NON_REQUIRED", "")
        arcpy.CalculateField_management(CopyLine, "From_Measure", "0", "PYTHON_9.3", "")
        arcpy.AddField_management(CopyLine, "To_Measure", "DOUBLE", "", "", "", "", "NULLABLE", "NON_REQUIRED", "")
        try :
            arcpy.CalculateField_management(CopyLine, "To_Measure", "!shape.length!", "PYTHON_9.3", "")
        except :
            arcpy.CalculateField_management(CopyLine, "To_Measure", "!forme.length!", "PYTHON_9.3", "")
        
        #/conversion in routes
        LineRoutes = arcpy.CreateRoutes_lr(CopyLine, "Rank_AGO", "%ScratchWorkspace%\\LineRoutes", "TWO_FIELDS", "From_Measure", "To_Measure")
        arcpy.AddField_management(LineRoutes, "Order_ID", "LONG", "", "", "", "", "NULLABLE", "NON_REQUIRED", "")
        arcpy.AddField_management(LineRoutes, "Rank_UGO", "LONG", "", "", "", "", "NULLABLE", "NON_REQUIRED", "")
        arcpy.AddField_management(LineRoutes, "AGO_Val", "DOUBLE", "", "", "", "", "NULLABLE", "NON_REQUIRED", "")
        
        UPD_SL.UpToDateShapeLengthField(LineRoutes)
        

        Ext.Export(CopyLine,TempFolder,"ExportTable")       

        fichier = open(TempFolder+"\\ExportTable.txt", 'r')        
        Order_ID = []
        Rank_UGO = []
        Dist = []
        Rank_AGO = []
        AGO_Val = []
        
        head = fichier.readline().split('\n')[0].split(';')
        iOrder_ID = head.index("Order_ID")
        iRank_UGO = head.index("Rank_UGO")
        iRank_AGO = head.index("Rank_AGO")
        iAGO_Val = head.index("AGO_Val")
        
        for l in fichier:
            Order_ID.append(int(l.split('\n')[0].split(';')[iOrder_ID]))
            Rank_UGO.append(int(l.split('\n')[0].split(';')[iRank_UGO]))
            Rank_AGO.append(float(l.split('\n')[0].split(';')[iRank_AGO]))
            AGO_Val.append(float(l.split('\n')[0].split(';')[iAGO_Val].replace(',','.')))

        p=0
        rows1 = arcpy.UpdateCursor(LineRoutes)
        for line1 in rows1 :
            line1.Order_ID = Order_ID[p]
            line1.Rank_UGO = Rank_UGO[p]
            line1.Rank_AGO = Rank_AGO[p]
            line1.AGO_Val = AGO_Val[p]
            rows1.updateRow(line1)
            p+=1
    
        #/creation of the event table
        PointEventTEMP = arcpy.CreateTable_management("%ScratchWorkspace%", "PointEventTEMP", "", "")
        arcpy.AddField_management(PointEventTEMP, "Distance_From_Start", "DOUBLE", "", "", "", "", "NULLABLE", "NON_REQUIRED", "")
        arcpy.AddField_management(PointEventTEMP, "To_M", "DOUBLE", "", "", "", "", "NULLABLE", "NON_REQUIRED", "")
        arcpy.AddField_management(PointEventTEMP, "Order_ID", "LONG", "", "", "", "", "NULLABLE", "NON_REQUIRED", "")
        arcpy.AddField_management(PointEventTEMP, "Rank_UGO", "LONG", "", "", "", "", "NULLABLE", "NON_REQUIRED", "")
        arcpy.AddField_management(PointEventTEMP, "Rank_AGO", "LONG", "", "", "", "", "NULLABLE", "NON_REQUIRED", "")
        arcpy.AddField_management(PointEventTEMP, "AGO_Val", "DOUBLE", "", "", "", "", "NULLABLE", "NON_REQUIRED", "")

        
        rowslines = arcpy.SearchCursor(LineRoutes)
        rowsevents = arcpy.InsertCursor(PointEventTEMP)
        for line in rowslines:
            tempdistance = float(0)
            while (tempdistance < float(line.Shape_Length)):
                row = rowsevents.newRow()
                row.Distance_From_Start = tempdistance
                row.To_M = tempdistance + float(Distance)
                row.Order_ID = line.Order_ID
                row.Rank_UGO = line.Rank_UGO
                row.Rank_AGO = line.Rank_AGO
                row.AGO_Val = line.AGO_Val
                rowsevents.insertRow(row)
                tempdistance = tempdistance + float(Distance)
        del rowslines
        del rowsevents
        
        
        MakeRouteEventTEMP = arcpy.MakeRouteEventLayer_lr(LineRoutes, "Rank_AGO", PointEventTEMP, "Rank_AGO LINE Distance_From_Start To_M", "%ScratchWorkspace%\\MakeRouteEventTEMP")
        Split = arcpy.CopyFeatures_management(MakeRouteEventTEMP, "%ScratchWorkspace%\\Split", "", "0", "0", "0")
        arcpy.AddField_management(Split, "Distance", "LONG", "", "", "", "", "NULLABLE", "NON_REQUIRED", "")
        arcpy.CalculateField_management(Split, "Distance", "!Distance_From_Start!", "PYTHON_9.3", "")
        arcpy.DeleteField_management(Split, ["To_M","Distance_From_Start"])
        Sort = arcpy.Sort_management(Split, Output, [["Order_ID", "ASCENDING"], ["Rank_UGO", "ASCENDING"], ["Rank_AGO", "ASCENDING"], ["Distance", "ASCENDING"]])

        UPD_SL.UpToDateShapeLengthField(Sort)
        
        #/deleting of the temporary files
        if str(TF) == "true" :
            arcpy.Delete_management(Split)
            arcpy.Delete_management(CopyLine)
            arcpy.Delete_management(LineRoutes)
            arcpy.Delete_management(PointEventTEMP)
    
    
    
    
    return Sort
コード例 #7
0
def to_featureclass(geo,
                    location,
                    overwrite=True,
                    validate=False):
    """
    Exports the DataFrame to a Feature class.

    ===============     ====================================================
    **Argument**        **Description**
    ---------------     ----------------------------------------------------
    location            Required string. This is the output location for the
                        feature class. This should be the path and feature
                        class name.
    ---------------     ----------------------------------------------------
    overwrite           Optional Boolean. If overwrite is true, existing
                        data will be deleted and replaced with the spatial
                        dataframe.
    ---------------     ----------------------------------------------------
    validate            Optional Boolean. If true, the export will check if
                        all the geometry objects are correct upon export.
    ===============     ====================================================


    :returns: string

    """


    out_location= os.path.dirname(location)
    fc_name = os.path.basename(location)
    df = geo._data
    old_idx = df.index
    df.reset_index(drop=True, inplace=True)
    if geo.name is None:
        raise ValueError("DataFrame must have geometry set.")
    if validate and \
       geo.validate(strict=True) == False:
        raise ValueError(("Mixed geometry types detected, "
                         "cannot export to feature class."))
    if HASARCPY:
        # 1. Create the Save Feature Class
        #
        columns = df.columns.tolist()
        join_dummy = "AEIOUYAJC81Z"
        columns.pop(columns.index(df.spatial.name))
        dtypes = [(join_dummy, np.int64)]
        if overwrite and arcpy.Exists(location):
            arcpy.Delete_management(location)
        elif overwrite == False and arcpy.Exists(location):
            raise ValueError(('overwrite set to False, Cannot '
                              'overwrite the table. '))

        notnull = geo._data[geo._name].notnull()
        idx = geo._data[geo._name][notnull].first_valid_index()
        sr = geo._data[geo._name][idx]['spatialReference']
        gt = geo._data[geo._name][idx].geometry_type.upper()
        null_geom = {
            'point': pd.io.json.dumps({'x' : None, 'y': None, 'spatialReference' : sr}),
            'polyline' : pd.io.json.dumps({'paths' : [], 'spatialReference' : sr}),
            'polygon' : pd.io.json.dumps({'rings' : [], 'spatialReference' : sr}),
            'multipoint' : pd.io.json.dumps({'points' : [], 'spatialReference' : sr})
        }
        sr = geo._data[geo._name][idx].spatial_reference.as_arcpy
        null_geom = null_geom[gt.lower()]
        fc = arcpy.CreateFeatureclass_management(out_location,
                                                 spatial_reference=sr,
                                                 geometry_type=gt,
                                                 out_name=fc_name,
                                                 )[0]

        # 2. Add the Fields and Data Types
        oidfld = da.Describe(fc)['OIDFieldName']
        for col in columns[:]:
            if col.lower() in ['fid', 'oid', 'objectid']:
                dtypes.append((col, np.int32))
            elif df[col].dtype.name.startswith('datetime64[ns'):
                dtypes.append((col, '<M8[us]'))
            elif df[col].dtype.name == 'object':
                try:
                    u = type(df[col][df[col].first_valid_index()])
                except:
                    u = pd.unique(df[col].apply(type)).tolist()[0]
                if issubclass(u, str):
                    mlen = df[col].str.len().max()
                    dtypes.append((col, '<U%s' % int(mlen)))
                else:
                    try:
                        if df[col][idx] is None:
                            dtypes.append((col, '<U254'))
                        else:
                            dtypes.append((col, type(df[col][idx])))
                    except:
                        dtypes.append((col, '<U254'))
            elif df[col].dtype.name == 'int64':
                dtypes.append((col, np.int64))
            elif df[col].dtype.name == 'bool':
                dtypes.append((col, np.int32))
            else:
                dtypes.append((col, df[col].dtype.type))

        array = np.array([], np.dtype(dtypes))
        arcpy.da.ExtendTable(fc, oidfld, array, join_dummy, append_only=False)

        # 3. Insert the Data
        fields = arcpy.ListFields(fc)
        icols = [fld.name for fld in fields \
                 if fld.type not in ['OID', 'Geometry'] and \
                 fld.name in df.columns] + ['SHAPE@JSON']
        dfcols = [fld.name for fld in fields \
                  if fld.type not in ['OID', 'Geometry'] and\
                  fld.name in df.columns] + [df.spatial.name]

        with da.InsertCursor(fc, icols) as irows:
            dt_fld_idx = [irows.fields.index(col) for col in df.columns \
                          if df[col].dtype.name.startswith('datetime64[ns')]
            def _insert_row(row):
                row[-1] = pd.io.json.dumps(row[-1])
                for idx in dt_fld_idx:
                    if isinstance(row[idx], type(pd.NaT)):
                        row[idx] = None
                irows.insertRow(row)
            q = df[geo._name].isna()
            df.loc[q, 'SHAPE'] = null_geom # set null values to proper JSON
            np.apply_along_axis(_insert_row, 1, df[dfcols].values)
            df.loc[q, 'SHAPE'] = None # reset null values
        df.set_index(old_idx)
        return fc
    elif HASPYSHP:
        if fc_name.endswith('.shp') == False:
            fc_name = "%s.shp" % fc_name
        if SHPVERSION < [2]:
            res = _pyshp_to_shapefile(df=df,
                            out_path=out_location,
                            out_name=fc_name)
            df.set_index(old_idx)
            return res
        else:
            res = _pyshp2(df=df,
                          out_path=out_location,
                          out_name=fc_name)
            df.set_index(old_idx)
            return res
    elif HASARCPY == False and HASPYSHP == False:
        raise Exception(("Cannot Export the data without ArcPy or PyShp modules."
                        " Please install them and try again."))
    else:
        df.set_index(old_idx)
        return None
コード例 #8
0
# checks if all shps in a folder have the same field names and order

# ArcPy ListFields documentation
# http://pro.arcgis.com/en/pro-app/arcpy/functions/listfields.htm

import arcpy

# folder path
arcpy.env.workspace = r"PATH"

# takes first shp in a folder and codes field names to a list
for s in arcpy.ListFiles('*.shp'):
    fields = arcpy.ListFields(s)
    f_list = []
    for f in fields:
        f_list.append(f.name)
    break

c = 0
x = 0

# loops through folder coding field names to a new list and checking it against the original list
# if it doesn't match, it breaks the loop and prints a message
for s in arcpy.ListFiles('*.shp'):
    print s
    fields = arcpy.ListFields(s)
    f_s_list = []
    for f in fields:
        print f.name
        f_s_list.append(f.name)
    if f_list != f_s_list:
コード例 #9
0
#	arcpy.CheckOutExtension("Spatial")

for i in range(len(raster_files)):
    in_value_raster=raster_files[i]
    raster_name=raster_names[i]
    tmp_table=results_dir+area_type+" "+ raster_name + " zone stats.dbf"
    out_table=results_dir+area_type+" "+ raster_name + " zone stats.csv"
    arcpy.sa.ZonalStatisticsAsTable (shapefile_file, shapefile_col, in_value_raster, tmp_table)

    import arcpy,csv

    table =tmp_table
    outfile = out_table

    #--first lets make a list of all of the fields in the table
    fields = arcpy.ListFields(table)
    field_names = [field.name for field in fields]
    fields = arcpy.ListFields(table)
    field_names = [field.name for field in fields]

    with open(outfile,'wb') as f:
        w = csv.writer(f)
        #--write all field names to the output file
        w.writerow(field_names)

        #--now we make the search cursor that will iterate through the rows of the table
        for row in arcpy.SearchCursor(table):
            field_vals = [row.getValue(field.name) for field in fields]
            w.writerow(field_vals)
        del row
コード例 #10
0
                if flow != None or flow != ' ':
                    time += flow

    # return the total time for the path and the path
    return [time, mergePath]


try:
    # Name of geometric network
    sdnNet = workspace + '\\SDN\\STORMDRAINNET_NET'

    # Feature class to be used as flags in tracing the geometric network
    flags = 'CatchBasin'

    # Add flow time field to catch basins if not already present
    fields = [f.name for f in arcpy.ListFields(flags)]

    newField = ['FlowTime']
    for field in newField:
        if field not in fields:
            arcpy.AddField_management(flags, field, 'FLOAT')
            print field + ' added to ' + flags + '...'

    # Start an edit session. Must provide the worksapce.
    edit = arcpy.da.Editor(workspace)

    # Edit session is started without an undo/redo stack for versioned data
    # (for second argument, use False for unversioned data)
    # For fgdbs, use settings below.
    edit.startEditing(False, False)
コード例 #11
0
#
        print
        print "*********************:  SUMMARIZATION  :*********************"
        print
        # Create variables for the input and output feature classes
        #inBld1 = ("C:/LiDAR/Base_map/"+str(cityq)+str("_2d_buildings")+str(".shp"))
        import arcpy
        outFolderP2 = (str(dPath)+str(cityq)+"/Summary")
        outName2   =  "summary.gdb"
        arcpy.CreateFileGDB_management(outFolderP2, outName2)        
        inBld1 = inMaskData
        inBld2 = arcpy.MakeFeatureLayer_management(inBld1, "bldg_lyr")
        inBld3 = (str(dPath)+str(cityq)+"/Summary/summary.gdb/bldg") 
        arcpy.CopyFeatures_management(inBld2,inBld3)
        dField = ["id","AREA_M2","AVGHT_M","MINHT_M","MAXHT_M","BASE_M","LEN","WID","ORIENT8"]
        fList = arcpy.ListFields(inBld3)
        nameList = []        
        for f in fList:
                nameList.append(f.name)
        for f in nameList:
                for f2 in dField:
                        if f == f2:
                           arcpy.DeleteField_management(inBld3,f2)

        #print "Processed step 35,  Minimum boundary Geometry created for city:" +str(cityq)
        print "Processed step 35, copied Feature Class for Minimum boundary Geometry creation"
        #inBld = (str(dPath)+str(cityq)+"/Combine/"+str("bldg")+str(".shp"))                                     ######## needs to creat for every city########################
        outmbg1 = (str(dPath)+str(cityq)+"/Summary/summary.gdb/minboundgeom")                                     
        # Use MinimumBoundingGeometry function to get a convex hull area
        #for each cluster of trees which are multipoint features
        arcpy.MinimumBoundingGeometry_management(inBld3, outmbg1, "CONVEX_HULL", "ALL")                           #Output, minboundgeom
コード例 #12
0
                                          '#', '#', fDef[3], '#',
                                          transDict[fDef[2]])
            else:
                arcpy.AddField_management(thisFC, fDef[0], transDict[fDef[1]],
                                          '#', '#', '#', '#',
                                          transDict[fDef[2]])
            cp2Fields.append(fDef[0])
        except:
            addMsgAndPrint('Failed to add field ' + fDef[0] +
                           ' to feature class ' + featureClass)
            addMsgAndPrint(arcpy.GetMessages(2))

# if labelPoints specified
## add any missing fields to centerPoints2
if arcpy.Exists(labelPoints):
    lpFields = arcpy.ListFields(labelPoints)
    for lpF in lpFields:
        if not lpF.name in cp2Fields:
            addMsgAndPrint(lpF.type)
            if lpF.type in ('Text', 'STRING', 'String'):
                arcpy.AddField_management(centerPoints2, lpF.name, 'TEXT', '#',
                                          '#', lpF.length)
            else:
                if lpF.type in typeTransDict:
                    arcpy.AddField_management(centerPoints2, lpF.name,
                                              typeTransDict[lpF.type])
# append labelPoints to centerPoints2
if arcpy.Exists(labelPoints):
    arcpy.Append_management(labelPoints, centerPoints2, 'NO_TEST')

#if inPolys are to be saved, copy inpolys to savedPolys
コード例 #13
0
def RotateFeatureClass(inputFC, outputFC, angle=0, pivot_point=None):
    """Rotate Feature Class

    inputFC     Input features
    outputFC    Output feature class
    angle       Angle to rotate, in degrees
    pivot_point X,Y coordinates (as space-separated string)
                Default is lower-left of inputFC

    As the output feature class no longer has a "real" xy locations,
    after rotation, it no coordinate system defined.
    """
    def RotateXY(x, y, xc=0, yc=0, angle=0, units="DEGREES"):
        """Rotate an xy cooordinate about a specified origin

        x,y      xy coordinates
        xc,yc   center of rotation
        angle   angle
        units    "DEGREES" (default) or "RADIANS"
        """
        import math
        x = x - xc
        y = y - yc
        # make angle clockwise (like Rotate_management)
        angle = angle * -1
        if units == "DEGREES":
            angle = math.radians(angle)
        xr = (x * math.cos(angle)) - (y * math.sin(angle)) + xc
        yr = (x * math.sin(angle)) + (y * math.cos(angle)) + yc
        return xr, yr

    # temp names for cleanup
    env_file = None
    lyrFC, lyrTmp, lyrOut = [None] * 3  # layers
    tmpFC = None  # temp dataset
    Row, Rows, oRow, oRows = [None] * 4  # cursors

    try:
        # process parameters
        try:
            xcen, ycen = [float(xy) for xy in pivot_point.split()]
            pivot_point = xcen, ycen
        except:
            # if pivot point was not specified, get it from
            # the lower-left corner of the feature class
            ext = arcpy.Describe(inputFC).extent
            xcen, ycen = ext.XMin, ext.YMin
            pivot_point = xcen, ycen

        angle = float(angle)

        # set up environment
        env_file = arcpy.CreateScratchName("xxenv", ".xml", "file",
                                           os.environ["TEMP"])
        arcpy.SaveSettings(env_file)

        # Disable any GP environment clips or project on the fly
        arcpy.ClearEnvironment("extent")
        arcpy.ClearEnvironment("outputCoordinateSystem")
        WKS = env.workspace
        if not WKS:
            if os.path.dirname(outputFC):
                WKS = os.path.dirname(outputFC)
            else:
                WKS = os.path.dirname(arcpy.Describe(inputFC).catalogPath)
        env.workspace = env.scratchWorkspace = WKS

        # Disable GP environment clips or project on the fly
        arcpy.ClearEnvironment("extent")
        arcpy.ClearEnvironment("outputCoordinateSystem")

        # get feature class properties
        lyrFC = "lyrFC"
        arcpy.MakeFeatureLayer_management(inputFC, lyrFC)
        dFC = arcpy.Describe(lyrFC)
        shpField = dFC.shapeFieldName
        shpType = dFC.shapeType
        FID = dFC.OIDFieldName

        # create temp feature class
        tmpFC = arcpy.CreateScratchName("xxfc", "", "featureclass")
        arcpy.CreateFeatureclass_management(os.path.dirname(tmpFC),
                                            os.path.basename(tmpFC), shpType)
        lyrTmp = "lyrTmp"
        arcpy.MakeFeatureLayer_management(tmpFC, lyrTmp)

        # set up id field (used to join later)
        TFID = "XXXX_FID"
        arcpy.AddField_management(lyrTmp, TFID, "LONG")
        arcpy.DeleteField_management(lyrTmp, "ID")

        # rotate the feature class coordinates
        # only points, polylines, and polygons are supported

        # open read and write cursors
        Rows = arcpy.SearchCursor(lyrFC, "", "", "%s;%s" % (shpField, FID))
        oRows = arcpy.InsertCursor(lyrTmp)
        arcpy.AddMessage("Opened search cursor")
        if shpType == "Point":
            for Row in Rows:
                shp = Row.getValue(shpField)
                pnt = shp.getPart()
                pnt.X, pnt.Y = RotateXY(pnt.X, pnt.Y, xcen, ycen, angle)
                oRow = oRows.newRow()
                oRow.setValue(shpField, pnt)
                oRow.setValue(TFID, Row.getValue(FID))
                oRows.insertRow(oRow)
        elif shpType in ["Polyline", "Polygon"]:
            parts = arcpy.Array()
            rings = arcpy.Array()
            ring = arcpy.Array()
            for Row in Rows:
                shp = Row.getValue(shpField)
                p = 0
                for part in shp:
                    for pnt in part:
                        if pnt:
                            x, y = RotateXY(pnt.X, pnt.Y, xcen, ycen, angle)
                            ring.add(arcpy.Point(x, y, pnt.ID))
                        else:
                            # if we have a ring, save it
                            if len(ring) > 0:
                                rings.add(ring)
                                ring.removeAll()
                    # we have our last ring, add it
                    rings.add(ring)
                    ring.removeAll()
                    # if only one, remove nesting
                    if len(rings) == 1: rings = rings.getObject(0)
                    parts.add(rings)
                    rings.removeAll()
                    p += 1

                # if only one, remove nesting
                if len(parts) == 1: parts = parts.getObject(0)
                if dFC.shapeType == "Polyline":
                    shp = arcpy.Polyline(parts)
                else:
                    shp = arcpy.Polygon(parts)
                parts.removeAll()
                oRow = oRows.newRow()
                oRow.setValue(shpField, shp)
                oRow.setValue(TFID, Row.getValue(FID))
                oRows.insertRow(oRow)
        else:
            raise Exception, "Shape type {0} is not supported".format(shpType)

        del oRow, oRows  # close write cursor (ensure buffer written)
        oRow, oRows = None, None  # restore variables for cleanup

        # join attributes, and copy to output
        arcpy.AddJoin_management(lyrTmp, TFID, lyrFC, FID)
        env.qualifiedFieldNames = False
        arcpy.Merge_management(lyrTmp, outputFC)
        lyrOut = "lyrOut"
        arcpy.MakeFeatureLayer_management(outputFC, lyrOut)
        # drop temp fields 2,3 (TFID, FID)
        fnames = [f.name for f in arcpy.ListFields(lyrOut)]
        dropList = ";".join(fnames[2:4])
        arcpy.DeleteField_management(lyrOut, dropList)

    except MsgError, xmsg:
        arcpy.AddError(str(xmsg))
コード例 #14
0
    def RemoveAndAddFeatures(self,
                             url,
                             pathToFeatureClass,
                             id_field,
                             chunksize=1000):
        """Deletes all features in a feature service and uploads features from a feature class on disk.

        Args:
            url (str): The URL of the feature service.
            pathToFeatureClass (str): The path of the feature class on disk.
            id_field (str): The name of the field in the feature class to use for chunking.
            chunksize (int): The maximum amount of features to upload at a time. Defaults to 1000.
        Raises:
            ArcRestHelperError: if ``arcpy`` can't be found.

        """
        fl = None

        try:
            if arcpyFound == False:
                raise common.ArcRestHelperError({
                    "function":
                    "RemoveAndAddFeatures",
                    "line":
                    inspect.currentframe().f_back.f_lineno,
                    "filename":
                    'featureservicetools',
                    "synerror":
                    "ArcPy required for this function"
                })
            arcpy.env.overwriteOutput = True
            tempaddlayer = 'ewtdwedfew'
            if not arcpy.Exists(pathToFeatureClass):
                raise common.ArcRestHelperError({
                    "function":
                    "RemoveAndAddFeatures",
                    "line":
                    inspect.currentframe().f_back.f_lineno,
                    "filename":
                    'featureservicetools',
                    "synerror":
                    "%s does not exist" % pathToFeatureClass
                })

            fields = arcpy.ListFields(pathToFeatureClass, wild_card=id_field)
            if len(fields) == 0:
                raise common.ArcRestHelperError({
                    "function":
                    "RemoveAndAddFeatures",
                    "line":
                    inspect.currentframe().f_back.f_lineno,
                    "filename":
                    'featureservicetools',
                    "synerror":
                    "%s field does not exist" % id_field
                })
            strFld = True
            if fields[0].type != 'String':
                strFld = False

            fl = FeatureLayer(url=url, securityHandler=self._securityHandler)

            id_field_local = arcpy.AddFieldDelimiters(pathToFeatureClass,
                                                      id_field)
            idlist = []
            print(
                arcpy.GetCount_management(
                    in_rows=pathToFeatureClass).getOutput(0) +
                " features in the layer")
            with arcpy.da.SearchCursor(pathToFeatureClass,
                                       (id_field)) as cursor:
                allidlist = []

                for row in cursor:
                    if (strFld):
                        idlist.append("'" + row[0] + "'")
                    else:
                        idlist.append(row[0])
                    if len(idlist) >= chunksize:
                        allidlist.append(idlist)
                        idlist = []

                if len(idlist) > 0:
                    allidlist.append(idlist)
                for idlist in allidlist:
                    idstring = ' in (' + ','.join(idlist) + ')'
                    sql = id_field + idstring
                    sqlLocalFC = id_field_local + idstring
                    results = fl.deleteFeatures(where=sql,
                                                rollbackOnFailure=True)

                    if 'error' in results:
                        raise common.ArcRestHelperError({
                            "function":
                            "RemoveAndAddFeatures",
                            "line":
                            inspect.currentframe().f_back.f_lineno,
                            "filename":
                            'featureservicetools',
                            "synerror":
                            results['error']
                        })
                    elif 'deleteResults' in results:
                        print("%s features deleted" %
                              len(results['deleteResults']))
                        for itm in results['deleteResults']:
                            if itm['success'] != True:
                                print(itm)
                    else:
                        print(results)

                    arcpy.MakeFeatureLayer_management(pathToFeatureClass,
                                                      tempaddlayer, sqlLocalFC)
                    results = fl.addFeatures(fc=tempaddlayer)

                    if 'error' in results:
                        raise common.ArcRestHelperError({
                            "function":
                            "RemoveAndAddFeatures",
                            "line":
                            inspect.currentframe().f_back.f_lineno,
                            "filename":
                            'featureservicetools',
                            "synerror":
                            results['error']
                        })
                    elif 'addResults' in results:
                        print("%s features added" % len(results['addResults']))
                        for itm in results['addResults']:
                            if itm['success'] != True:
                                print(itm)
                    else:
                        print(results)
                    idlist = []
            if 'error' in results:
                raise common.ArcRestHelperError({
                    "function":
                    "RemoveAndAddFeatures",
                    "line":
                    inspect.currentframe().f_back.f_lineno,
                    "filename":
                    'featureservicetools',
                    "synerror":
                    results['error']
                })
            else:
                print(results)
        except arcpy.ExecuteError:
            line, filename, synerror = trace()
            raise common.ArcRestHelperError({
                "function": "create_report_layers_using_config",
                "line": line,
                "filename": filename,
                "synerror": synerror,
                "arcpyError": arcpy.GetMessages(2),
            })
        except:
            line, filename, synerror = trace()
            raise common.ArcRestHelperError({
                "function": "AddFeaturesToFeatureLayer",
                "line": line,
                "filename": filename,
                "synerror": synerror,
            })
        finally:
            gc.collect()
コード例 #15
0
for shp in arcpy.ListFeatureClasses():
    # check if singlepart
    if sum(1 for row in arcpy.da.SearchCursor(shp, "*")) > 1:
        # prep for creating new shp
        new_shp = os.path.join(os.path.dirname(shp), os.path.basename(shp).split(".")[0]+"_2.shp")
        # if so, run dissolve tool
        arcpy.Dissolve_management(shp, new_shp)
        # add route num field
        arcpy.AddField_management(new_shp, "ROUTE_NM", "TEXT", field_length=50)
        # populate w/ shapefile name
        with arcpy.da.UpdateCursor(new_shp, "ROUTE_NM") as cursor:
            for row in cursor:
                row[0] = os.path.basename(shp).split(".")[0]
                cursor.updateRow(row)
        continue
    if "ROUTE_NM" in [field.name for field in arcpy.ListFields(shp)]:
        continue
    else:
        arcpy.AddField_management(shp, "ROUTE_NM", "TEXT", field_length=50)
        with arcpy.da.UpdateCursor(shp, "ROUTE_NM") as cursor:
            for row in cursor:
                row[0] = os.path.basename(shp).split(".")[0]
                cursor.updateRow(row)

# finally, merge all shps into one, and preserve route num field (field mapping)
field_map = arcpy.FieldMappings()

merge_shps = arpcy.ListFeatureClasses("*_2*")
arcpy.Merge_management(merge_shps, arcpy.env.workspace + "SystemRoutes", field_map)
コード例 #16
0
def esri_field_exists(in_tbl, field_name):
    fields = [f.name for f in arcpy.ListFields(in_tbl)]
    if field_name in fields:
        return True
    else:
        return False
コード例 #17
0
 def detruireDomaineAttribut(self, workspace):
 #-------------------------------------------------------------------------------------
     """
     Permet de détruire tous les domaines existants dans la Géodatabase.
     
     Paramètres:
     -----------
     workspace   : Nom de la géodatabase ou les domaines seront détruits.
     
     """
     
     #Envoyer un message
     arcpy.AddMessage(" ")
     arcpy.AddMessage("- Détruire tous les domaines existants dans les classes de la Géodatabase")
     
     #Définir le workspace par défaut
     arcpy.env.workspace = workspace
     
     #Extraire la description de la Géodatabase
     #for fc in classe.split(","):
     for fc in arcpy.ListFeatureClasses():
         #Extraire les fields
         fields = arcpy.ListFields(fc)
         
         #Traiter tous les fields
         for field in fields:
             #Vérifier la présence d'un domaine
             if len(field.domain) > 0:
                 #Vérifier si l'attribut est présent dans la liste des attribut
                 #if field.name in attribut:
                 
                 #Afficher le message
                 arcpy.AddMessage(" RemoveDomainFromField_management('" + fc + "', '" + field.name + "')")
                 
                 #Détruire un domaine dans un attribut d'une classe
                 arcpy.RemoveDomainFromField_management(fc, field.name)
     
     #Envoyer un message
     arcpy.AddMessage(" ")
     arcpy.AddMessage("- Détruire tous les domaines existants dans la Géodatabase")
     
     #Extraire la description de la Géodatabase
     desc = arcpy.Describe(workspace)
     
     #Extraire tous les domaines existants de la Géodatabase
     domains = desc.domains
     
     #Traiter tous les domaines
     for domain in domains:
         #Vérifier si c'est un domaine
         #if "DOM_" in domain:
         
         #Afficher le message
         arcpy.AddMessage(" DeleteDomain_management('" + workspace + "', '" + domain + "')")
         
         try:
             #Détruire un domaine
             arcpy.DeleteDomain_management(workspace, domain)
         #Gestion des erreurs
         except Exception, err:
             #Afficher l'erreur
             arcpy.AddWarning(err.message)
コード例 #18
0
arcpy.CalculateField_management(
    in_table=ri_layer_name,
    field="SIDEWALK_EITHER_MILES",
    expression="[CENTERLINE_MILES] * [SIDEWALK_EITHER]",
    expression_type="VB",
    code_block="#")
arcpy.AddMessage("Completed step 17.")

# STEP 18: summary statistics: sum of CENTERLINE_MILES, sum of SIDEWALK_EITHER_MILES;
#                              case fields: TOWN_ID, TOWN
arcpy.Statistics_analysis(
    in_table=ri_name,
    out_table=output_table_name,
    statistics_fields="CENTERLINE_MILES SUM;SIDEWALK_EITHER_MILES SUM",
    case_field="TOWN_ID;TOWN")
arcpy.AddMessage("Completed step 18.")

# Step 19: Export output file GDB table to CSV file.
# Source: http://gis.stackexchange.com/questions/109008/python-script-to-export-csv-tables-from-gdb
fields = arcpy.ListFields(output_table_name)
field_names = [field.name for field in fields]

with open(output_csv_file, 'wb') as f:
    w = csv.writer(f)
    w.writerow(field_names)
    for row in arcpy.SearchCursor(output_table_name):
        field_vals = [row.getValue(field.name) for field in fields]
        w.writerow(field_vals)
    del row
arcpy.AddMessage("Completed step 19.")
arcpy.AddMessage("Tool completed execution.")
コード例 #19
0
def to_table(geo, location, overwrite=True):
    """
    Exports a geo enabled dataframe to a table.

    ===========================     ====================================================================
    **Argument**                    **Description**
    ---------------------------     --------------------------------------------------------------------
    location                        Required string. The output of the table.
    ---------------------------     --------------------------------------------------------------------
    overwrite                       Optional Boolean.  If True and if the table exists, it will be
                                    deleted and overwritten.  This is default.  If False, the table and
                                    the table exists, and exception will be raised.
    ===========================     ====================================================================

    :returns: String
    """
    out_location= os.path.dirname(location)
    fc_name = os.path.basename(location)
    df = geo._data
    if location.lower().find('.csv') > -1:
        geo._df.to_csv(location)
        return location
    elif HASARCPY:
        columns = df.columns.tolist()
        join_dummy = "AEIOUYAJC81Z"
        try:
            columns.pop(columns.index(df.spatial.name))
        except:
            pass
        dtypes = [(join_dummy, np.int64)]
        if overwrite and arcpy.Exists(location):
            arcpy.Delete_management(location)
        elif overwrite == False and arcpy.Exists(location):
            raise ValueError(('overwrite set to False, Cannot '
                              'overwrite the table. '))
        fc = arcpy.CreateTable_management(out_path=out_location,
                                          out_name=fc_name)[0]
        # 2. Add the Fields and Data Types
        #
        oidfld = da.Describe(fc)['OIDFieldName']
        for col in columns[:]:
            if col.lower() in ['fid', 'oid', 'objectid']:
                dtypes.append((col, np.int32))
            elif df[col].dtype.name == 'datetime64[ns]':
                dtypes.append((col, '<M8[us]'))
            elif df[col].dtype.name == 'object':
                try:
                    u = type(df[col][df[col].first_valid_index()])
                except:
                    u = pd.unique(df[col].apply(type)).tolist()[0]
                if issubclass(u, str):
                    mlen = df[col].str.len().max()
                    dtypes.append((col, '<U%s' % int(mlen)))
                else:
                    try:
                        dtypes.append((col, type(df[col][0])))
                    except:
                        dtypes.append((col, '<U254'))
            elif df[col].dtype.name == 'int64':
                dtypes.append((col, np.int64))
            elif df[col].dtype.name == 'bool':
                dtypes.append((col, np.int32))
            else:
                dtypes.append((col, df[col].dtype.type))

        array = np.array([],
                        np.dtype(dtypes))
        arcpy.da.ExtendTable(fc,
                             oidfld, array,
                             join_dummy, append_only=False)
        # 3. Insert the Data
        #
        fields = arcpy.ListFields(fc)
        icols = [fld.name for fld in fields \
                 if fld.type not in ['OID', 'Geometry'] and \
                 fld.name in df.columns]
        dfcols = [fld.name for fld in fields \
                  if fld.type not in ['OID', 'Geometry'] and\
                  fld.name in df.columns]
        with da.InsertCursor(fc, icols) as irows:
            for idx, row in df[dfcols].iterrows():
                try:
                    irows.insertRow(row.tolist())
                except:
                    print("row %s could not be inserted." % idx)
        return fc

    return
コード例 #20
0
ファイル: fc_to_pd_df.py プロジェクト: gtdang/arcpy_functions
def fc_to_pd_df(feature_class,
                field_list=None,
                skip_nulls=False,
                null_value=-999):
    '''
    This function converts a feature class to a pandas dataframe. The default returns all fields but you may supply a
    list of fields to extract. Fields with the "Geometry" datatype like the "Shape" field of most FC are removed because
    they are not 1-dimensional and Pandas can't deal with that data type.

    Note that very large feature classes may not work due to memory limitations, especially if using 32bit python, which 
    applies to ArcMap users. You may try supplying a list of only the fields you require to get past the memory limitations. 
    ArcPro has 64bit python 3.0. This script has not been tested with that version.  
    
    Written: 7/17/2019 GD

    :param feature_class: Input ArcGIS Feature Class
    :param field_list: Fields for input (optional), default is all fields
    :return: Pandas dataframe object
    '''
    # Generate a list of fields to import.
    field_list_temp = []
    all_fields = []
    fields = arcpy.ListFields(feature_class)
    for field in fields:
        # If a list of fields is not supplied import all fields, check for and exclude geometry data types
        if field_list is None:
            if field.type != 'Geometry':
                field_list_temp.append(field.name)
            else:
                print(
                    "Field \"{0}\" is of data type \"{1}\" and will not be imported into the pandas dataframe."
                    .format(field.name, field.type))
        # If a list is supplied we will check if any of the requested fields are of geometry data type, remove, and warn user
        else:
            all_fields.append(
                field.name
            )  # Append fields to a list that will be used to check if user requested fields exist in the feature class
            if (field.type != 'Geometry') & (field.name in field_list):
                field_list_temp.append(field.name)
            elif (field.type == 'Geometry') & (field.name in field_list):
                print(
                    "Field \"{0}\" is of data type \"{1}\" and will not be imported into the pandas dataframe."
                    .format(field.name, field.type))

    # If field_list is set, check if requested fields exist in the FC
    if field_list is not None:
        for field in field_list:
            if field.name not in all_fields:
                raise ValueError(
                    "Requested field \"{0}\" was not found in the feature class!"
                    .format(field.name))

    # Set field list to the list of verified field names to extract
    field_list = field_list_temp

    # Convert FC to numpy array with field list
    np_array = arcpy.da.FeatureClassToNumPyArray(in_table=feature_class,
                                                 field_names=field_list,
                                                 skip_nulls=skip_nulls,
                                                 null_value=null_value)
    return pd.DataFrame(np_array)
コード例 #21
0
ファイル: common.py プロジェクト: simberaj/densarea
def fieldList(layer, type=None):
    '''Returns a list of field names of the specified layer attributes.'''
    if type is None:
        return [field.name for field in arcpy.ListFields(layer)]
    else:
        return [field.name for field in arcpy.ListFields(layer, '', type)]
def create_other_point(featuredata):
    cur = None
    try:
        cur = arcpy.InsertCursor(featuredata)
        fldname = [fldn.name for fldn in arcpy.ListFields(featuredata)]
        for i in range(1, nrows):

            L1 = table_coor.cell(
                getRowIndex(
                    table_coor, table.cell(
                        i, getColumnIndex(
                            table, "POINTNUMBER")).value, "POINTNUMBER"), getColumnIndex(
                    table_coor, "X")).value
            B1 = table_coor.cell(
                getRowIndex(
                    table_coor, table.cell(
                        i, getColumnIndex(
                            table, "POINTNUMBER")).value, "POINTNUMBER"), getColumnIndex(
                    table_coor, "Y")).value
            H1 = table_coor.cell(
                getRowIndex(
                    table_coor, table.cell(
                        i, getColumnIndex(
                            table, "POINTNUMBER")).value, "POINTNUMBER"), getColumnIndex(
                    table_coor, "Z")).value
            row = cur.newRow()
            if L1:
                point = arcpy.Point(
                    round(
                        float(L1), 8), round(
                        float(B1), 8), float(H1))
                row.shape = point
            for fldn in fldname:
                for j in range(0, ncols):
                    #print 112
                    if fldn == (str(table.cell(0, j).value).strip()).upper():
                        # print table.cell(i, j).ctype
                        if table.cell(i, j).ctype == 3:
                            # print table.cell(i, j).ctype
                            date = xlrd.xldate_as_tuple(
                                table.cell(i, j).value, 0)
                            # print(date)
                            tt = datetime.datetime.strftime(
                                datetime.datetime(*date), "%Y-%m-%d")
                            try:
                                row.setValue(fldn, tt)
                            except Exception as e:
                                print e

                        else:
                            try:
                                row.setValue(fldn, table.cell(i, j).value)
                            except Exception as e:
                                print e

            cur.insertRow(row)

    except Exception as e:
        print e
        print "\t 请检查表:%s 是否表头没有删除中文字段及注释部分" % filename
        arcpy.AddMessage(e)
    else:
        print "导入数据表{0}  {1}条数据".format(featuredata, nrows-1)

    finally:
        if cur:

            del cur
コード例 #23
0
 arcpy.FeatureClassToGeodatabase_conversion([
     'Stillaguamish_Subwatershed', 'Stillaguamish_Catchment', 'FishStream',
     'ForestRoad'
 ], out_gdb_path)
 print("FC to geodatabase conversion completed")
 ##----------------Alter miles field name for ForestRoad and FishStream------------------------------
 ## Change workspace file path as required.
 arcpy.env.workspace = "B:/GIS_Projects/421/Lab_4/Data/Still_Outputs.gdb"
 arcpy.env.parallelProcessingFactor = "100%"
 ## These are the target feature classes. Change them as necessary to suit your analysis.
 stream_road = ['FishStream', 'ForestRoad']
 fclist = arcpy.ListFeatureClasses()
 print(fclist)
 for fc in fclist:
     if fc == stream_road[0]:
         fieldlist = arcpy.ListFields(fc)
         print("%s fields:\n" % (fc))
         for field in fieldlist:
             print(field.name)
             ## Change "Miles" field to "Stream_Miles" to be more descriptive.
             if field.name == 'Miles':
                 print("Changing %s to Stream_Miles" % (field))
                 arcpy.AlterField_management(fc, field.name, 'Stream_Miles',
                                             'Fish Stream Miles')
                 print("%s now contains Stream_Miles" % (fc))
             else:
                 ()
     elif fc == stream_road[1]:
         fieldlist = arcpy.ListFields(fc)
         print("%s fields:\n" % (fc))
         for field in fieldlist:
コード例 #24
0
### Preliminary processing
# load parameters
with open("code/parameters/general.toml") as conffile:
    general_params = toml.loads(conffile.read())
with open("code/parameters/omit-areas.toml") as conffile:
    omit_areas_params = toml.loads(conffile.read())

# set environmental variables
arcpy.env.parallelProcessingFactor = general_params['threads']
arcpy.env.overwriteOutput = True

### Main processing
## parse expression
# parse fields in shapefile
file_fields = [file.name for file in arcpy.ListFields(sys.argv[1])]
for i in range(len(file_fields)):
    file_fields[i] = file_fields[i].encode('utf8')

# loop over fields in toml file
expression = ""
if 'omit character data' in omit_areas_params.keys():
    omit_fields = omit_areas_params['omit character data']
    for omit_field in omit_fields.keys():
        # parse field
        if omit_field in file_fields:
            if omit_fields[omit_field].__class__.__name__ == 'list':
                if len(omit_fields[omit_field]) > 1:
                    expression += '"' + omit_field + '" NOT IN ('
                    for value in omit_fields[omit_field]:
                        expression += "'" + value + "'" + ','
コード例 #25
0
    def execute(self, pParams):
        """ 
        pParams=(gdbSource, gdbTarget, gdbBackup, lTables)
        """
        sMsg = ""
        sOK = apwrutils.C_OK
        ds = time.clock()
        try:
            (gdbSource, gdbTarget, gdbBackup, lTables) = pParams
            pGDBSource = GDBOp(gdbSource)
            pGDBBackup = GDBOp(gdbBackup)
            pGDBTarget = GDBOp(gdbTarget)

            if ((self.DebugLevel & 1) == 1):
                sMsg = apwrutils.Utils.getcmdargs(pParams)
                arcpy.AddMessage(sMsg)
            #..make sure the target gdb has the tables, if not copy them.
            for i, sTable in enumerate(lTables):
                sTableNameT = pGDBTarget.getSDETableName(sTable)
                tbTarget = os.path.join(gdbTarget, sTableNameT)
                if (arcpy.Exists(tbTarget) == False):
                    sTableNameS = pGDBSource.getSDETableName(sTable)
                    tbSource = os.path.join(gdbSource, sTableNameS)
                    arcpy.Copy_management(tbSource,
                                          os.path.join(gdbTarget, sTable))
                    if (self.DebugLevel & 1) == 1:
                        arcpy.AddMessage("{}. Copy {} -> {}".format(
                            i, tbSource, tbTarget))

            #..Copy the tables from target to the backup gdb
            hd = "X_{}".format(apwrutils.Utils.GetDateTimeString())
            for i, sTable in enumerate(lTables):
                tbSource = os.path.join(gdbTarget,
                                        pGDBTarget.getSDETableName(sTable))
                tbTarget = os.path.join(gdbBackup, "{}_{}".format(hd, sTable))
                arcpy.Copy_management(tbSource, tbTarget)
                if (self.DebugLevel & 1) == 1:
                    arcpy.AddMessage("{}. Copy {} -> {}".format(
                        i, tbSource, tbTarget))

            for i, sTable in enumerate(lTables):
                sTableS = pGDBSource.getSDETableName(sTable)
                sTableT = pGDBTarget.getSDETableName(sTable)
                tbTarget = os.path.join(gdbTarget, sTableT)
                tbSource = os.path.join(gdbSource, sTableS)
                nCnt = int(arcpy.GetCount_management(tbSource)[0])
                arcpy.DeleteRows_management(tbTarget)
                arcpy.Append_management(tbSource, tbTarget, "NO_TEST")
                if (tbTarget.endswith("Max")):
                    #..trying to copy the field of Max_TS...
                    if (len(arcpy.ListFields(tbTarget, FN_MaxTSTimeDT)) > 0):
                        try:
                            arcpy.CalculateField_management(
                                tbTarget, FN_MaxTSTimeDT,
                                "!{}!".format(flooddsconfig.FN_ForecastTime),
                                "PYTHON_9.3")
                        except:
                            pass
                    if (len(arcpy.ListFields(tbTarget, FN_MaxTSTimeDT)) > 0):
                        try:
                            arcpy.CalculateField_management(
                                tbTarget, FN_MaxTSTime,
                                "!{}!".format(flooddsconfig.FN_TSTIME),
                                "PYTHON_9.3")
                        except:
                            pass

                if (self.DebugLevel & 1) == 1:
                    arcpy.AddMessage("{}. Copy {} recs, {} -> {}".format(
                        i, nCnt, tbSource, tbTarget))

        except:
            sMsg = trace()
            arcpy.AddMessage(sMsg)
            sOK = apwrutils.C_NOTOK
        finally:
            pass
        return (sOK, gdbBackup, sMsg)
コード例 #26
0
def unify(directory):

    print "Unifying lakes from " + directory + "..."

    # Get licensed
    if arcpy.CheckExtension("Spatial"):
        arcpy.CheckOutExtension("Spatial")
    else:
        print "No SA licence"
        exit

    # Load the environment
    env.workspace = "C:/Users/hengstam/Desktop/Research/hengst_env"
    hashlength = 30
    disphashlength = 12

    # This is where intersection results will be temporarily held
    output = "/temp/intersection_output.shp"
    if arcpy.Exists(output):
        arcpy.Delete_management(output)

    # This is where we trace lakes over for copying them
    tracingpaper = "/temp/tracing_output.shp"
    if arcpy.Exists(tracingpaper):
        arcpy.Delete_management(tracingpaper)

    # Get some names
    masterlakefile = "/master_lakes/master_lake_file.shp"

    ###########################################
    ## Define some new types of data structures
    class bubblebath(object):

        data = []

        def add(self, i):

            # Add our incoming group to the dataset
            self.data.append(set(i))

            # Iterate through new things
            for item in i:
                membership = []
                index = -1

                # Work through each group
                for bubble in self.data:
                    index += 1

                    # Work through each group member
                    for thing in bubble:

                        # If one of our new items matches a group member, remember that group.
                        if item == thing:
                            membership.append(index)

                            # We only need one match per group
                            break

                # Now we have a list of things we belong to. We may need to merge those.
                if len(membership) > 1:

                    newbubble = set()

                    # Merge them all
                    for member in membership:
                        newbubble = newbubble | self.data[member]

                    # Flip and reverse it so we don't change our indices while deleting
                    membership.reverse()

                    # Delete the old ones
                    for member in membership:
                        del self.data[member]

                    # Add the new one
                    self.data.append(newbubble)

                # And now we repeat for the rest of the items

        def read(self):

            # This is what we will eventually spit out
            out = []

            for i in self.data:
                out.append(list(i))

            return out

        def clean(self, size):

            # Initalize the index and a list of things to get rid of
            index = -1
            remove = []

            # Iterate
            for bubble in self.data:
                index += 1

                # Check if it's too small
                if len(bubble) <= size or size == 0:
                    remove.append(index)

            # Now flip the remove list
            remove.reverse()

            # And delete them all
            for i in remove:
                del self.data[i]

    # Make a clean reader
    def reader(text):
        t = str(text)
        return t[20:22]

    def iterreader(arr):
        s = "["
        for a in arr:
            s += reader(a)
            s += ', '
        s = s[:-2]
        s += ']'
        return s

    def twoiterreader(arr):
        s = "["
        for ar in arr:
            s += '['
            for a in ar:
                s += reader(a)
                s += ', '
            s = s[:-2]
            s += '], '
        s = s[:-3]
        s += ']'
        return s

    ####################
    ## BEGIN BEGIN BEGIN

    # Loop through all folder names
    fns = glob.glob(env.workspace + '/' + directory + '/*.shp')
    for fn in fns:

        # Trim the filename to get the directory component
        newlakes = fn[len(env.workspace):]

        print "Loading from " + newlakes + "..."

        ################################################################################
        ## Build a database to help us get feature class filenames from the master lakes
        print "Generating dictionary..."

        # Make the dictionary
        refDict = {}

        # Build a cursor so we can get info on the master lakes
        tempcursor = arcpy.da.SearchCursor(masterlakefile, ['FID', 'ref_id'])

        # Iterate through the cursor results and fill the dictionary
        for fid, hashname in tempcursor:
            refDict[fid] = hashname[:hashlength]

        # Delete the cursor
        del tempcursor
        print "Dictionary generated."

        ######################################
        ## Collect all FIDs and hashes from the new lakes
        newlakeFIDs = {}
        newRefDict = {}

        # Build a cursor so we can get the stuff from the new lakes
        tempcursor = arcpy.da.SearchCursor(newlakes, ['FID', 'lake_id'])

        for temprow in tempcursor:
            # Mark them all good for now
            newlakeFIDs[temprow[0]] = True

            # Load this up
            newRefDict[temprow[0]] = temprow[1]

        del tempcursor

        #################################
        ## Prepare to resolve lake merges
        merges = {}

        ###############################################
        ## Make lists of lakes which are being modified
        lakes_to_add = set()
        lakes_to_remove = set()

        ##########################
        ## Check for intersections
        print "Checking for intersections..."

        # Make a list of assignments
        assignments = {}

        # Run the master intersection
        arcpy.Intersect_analysis((newlakes, masterlakefile), output,
                                 'ONLY_FID')

        # Get the names of the two FID fields for the output
        fields = arcpy.ListFields(output)
        FID1 = fields[2].baseName
        FID2 = fields[3].baseName

        # Build a cursor which will iterate over the output fields
        cursor = arcpy.da.SearchCursor(output, [FID1, FID2])

        # Build feature layers on the new lake feature classe to enable selection of objects
        arcpy.Delete_management("newlakes_lyr")
        arcpy.MakeFeatureLayer_management(newlakes, "newlakes_lyr")

        # Iterate through the new intersection shapes
        print "Matching new lakes..."
        for row in cursor:

            # Get the saved input FIDs of each intesection
            newlakeFID = row[0]
            masterlake = row[1]

            # Lookup the reference in our handy-dandy dictionary
            lakeRef = '/master_lakes/lakes/' + refDict[masterlake] + '.shp'

            # This gets either the previous assignments or an empty list and then adds the current assignment to it
            if str(newlakeFID) in assignments:
                assignments[str(newlakeFID)].append(lakeRef)
            else:
                assignments[str(newlakeFID)] = [lakeRef]

            # Prepare to check for duplicates
            eject = False
            tempcursor = arcpy.da.SearchCursor(lakeRef, ['lake_id'])

            # Look through the already-saved lakes
            newRef = newRefDict[newlakeFID]
            for temprow in tempcursor:

                existingHash = temprow[0]

                # Check that we're not adding a duplicate
                if existingHash == newRef:
                    eject = True
                    break

            del tempcursor

            # Is it a duplicate?
            if eject:
                print 'Trying to add a duplicate lake ' + newRef[:
                                                                 disphashlength] + '. Ignoring.'

            # Nope
            else:

                # Prepare a partial feature class to copy it over (it's just going to be the one lake)
                arcpy.FeatureClassToFeatureClass_conversion(
                    newlakes, env.workspace, tracingpaper,
                    'FID = ' + str(row[0]))

                # Add this lake to the new feature class
                arcpy.Append_management(tracingpaper, lakeRef, "NO_TEST")

                # Delete the temp shit
                arcpy.Delete_management(tracingpaper)

                # This lake needs to be refreshed
                lakes_to_remove.add(lakeRef)
                lakes_to_add.add(lakeRef)

                print 'Added lake ' + newRef[:
                                             disphashlength] + ' to ' + lakeRef + '.'

            # Indicate that this lake has found a home
            newlakeFIDs[newlakeFID] = False

        del cursor

        # Remove the temp file
        arcpy.Delete_management(output)

        print "Matching complete."

        #####################################
        ## Make new lakes for new lake shapes

        # Iterate through all the lakes...
        cursor = arcpy.da.SearchCursor(newlakes, ['FID', 'lake_id'])

        for row in cursor:

            # Check from the dictionary to make sure it's untouched
            if newlakeFIDs[row[0]]:

                # Yay!
                hashID = row[1]
                hashID = hashID[:hashlength]

                # Save it to a brand-new feature class
                myNewLakeFilename = '/master_lakes/lakes/' + hashID + '.shp'
                if arcpy.Exists(myNewLakeFilename):
                    print "Skipping making a new lake, file already present."
                else:

                    # Make said brand-new feature class
                    arcpy.CreateFeatureclass_management(
                        env.workspace, myNewLakeFilename, "POLYGON")
                    arcpy.AddField_management(myNewLakeFilename, "ID", "LONG")
                    arcpy.AddField_management(myNewLakeFilename, "GRIDCODE",
                                              "LONG")
                    arcpy.AddField_management(myNewLakeFilename, "area",
                                              "DOUBLE")
                    arcpy.AddField_management(myNewLakeFilename, "centr_x",
                                              "DOUBLE")
                    arcpy.AddField_management(myNewLakeFilename, "centr_y",
                                              "DOUBLE")
                    arcpy.AddField_management(myNewLakeFilename, "lake_id",
                                              "STRING")
                    arcpy.AddField_management(myNewLakeFilename, "date",
                                              "LONG")
                    arcpy.AddField_management(myNewLakeFilename, "loc1",
                                              "SHORT")
                    arcpy.AddField_management(myNewLakeFilename, "loc2",
                                              "SHORT")

                    # Prepare a partial feature class to copy it over (it's just going to be the one lake)
                    arcpy.FeatureClassToFeatureClass_conversion(
                        newlakes, env.workspace, tracingpaper,
                        'FID = ' + str(row[0]))

                    # Add this lake to the new feature class
                    arcpy.Append_management(tracingpaper, myNewLakeFilename,
                                            "NO_TEST")

                    # Delete the temp shit
                    arcpy.Delete_management(tracingpaper)

                    # This needs to be added to the master file
                    lakes_to_add.add(myNewLakeFilename)

                    print "New lake found! Created a whole new file just for it, we'll call it " + hashID[:
                                                                                                          disphashlength] + '.'

        # Clean up
        del cursor

        ################################################
        ## Go through all matched lakes and find mergers

        print "Merge checking..."

        # Make our data structure
        bath = bubblebath()

        # Load them all in
        for assingment in assignments:
            print iterreader(
                assignments[assingment]) + ' --> ' + twoiterreader(bath.read())
            bath.add(assignments[assingment])

        # Clean the small things (aka a non-merger)
        print "Behold the final bubblebath:"

        bath.clean(1)
        print twoiterreader(bath.read())

        # Merge this stuff
        for bubble in bath.read():

            # Make a new feature class name
            m = hashlib.sha224()

            # Mutate hash using lake names
            for item in bubble:
                m.update(str(item))

            m.update('holla holla')

            # Export it
            hashvalue = m.hexdigest()
            myNewLakeFilename = '/master_lakes/lakes/' + hashvalue[:
                                                                   hashlength] + '.shp'

            del m

            if arcpy.Exists(myNewLakeFilename):
                print myNewLakeFilename
                print "Collision while trying to merge bubbles!!!"
            else:

                print "Bubbles will be merged into " + myNewLakeFilename + "."

                # Make said brand-new feature class
                arcpy.CreateFeatureclass_management(env.workspace,
                                                    myNewLakeFilename,
                                                    "POLYGON")
                arcpy.AddField_management(myNewLakeFilename, "ID", "LONG")
                arcpy.AddField_management(myNewLakeFilename, "GRIDCODE",
                                          "LONG")
                arcpy.AddField_management(myNewLakeFilename, "area", "DOUBLE")
                arcpy.AddField_management(myNewLakeFilename, "centr_x",
                                          "DOUBLE")
                arcpy.AddField_management(myNewLakeFilename, "centr_y",
                                          "DOUBLE")
                arcpy.AddField_management(myNewLakeFilename, "lake_id",
                                          "STRING")
                arcpy.AddField_management(myNewLakeFilename, "date", "LONG")
                arcpy.AddField_management(myNewLakeFilename, "loc1", "SHORT")
                arcpy.AddField_management(myNewLakeFilename, "loc2", "SHORT")

            for item in bubble:

                print "Merging " + item + "..."

                # Append all the other ones
                arcpy.Append_management(item, myNewLakeFilename, "NO_TEST")

                # Delete the old feature classes
                arcpy.Delete_management(item)

                # This needs to be removed
                lakes_to_remove.add(item)

            # Remove duplicate lakes from the unified feature class
            tempcursor = arcpy.da.UpdateCursor(myNewLakeFilename, ['lake_id'])

            # Make a list of lake IDs. When we find a duplicate we'll delete the dupe one
            IDs = set()

            for row in tempcursor:
                ID = row[0]
                if ID in IDs:
                    tempcursor.deleteRow()
                    print "Deleted a duplicate in the merged bubble."
                else:
                    IDs.add(ID)

            # Take out the trash
            del tempcursor, IDs

            # Make sure to add the new lake
            lakes_to_add.add(myNewLakeFilename)

            print "Merge successful."

            # Now do it for the others

        ####################################################
        ## Generate union shapes and update the master files
        print "Beginning master lake file update..."

        if len(lakes_to_add) == 0 and len(lakes_to_remove) == 0:
            print "actually nevermind..."
        else:

            ####################################################
            ## Generate union shapes and update the master files
            print "Beginning master lake file update..."

            # Make a new master lake file
            arcpy.Delete_management(masterlakefile)
            arcpy.CreateFeatureclass_management(env.workspace, masterlakefile,
                                                "POLYGON")
            arcpy.AddField_management(masterlakefile, "ref_id", "STRING")
            arcpy.AddField_management(masterlakefile, "n", "SHORT")
            arcpy.AddField_management(masterlakefile, "n_real", "SHORT")
            arcpy.AddField_management(masterlakefile, "n_ratio", "SHORT")

            print "Master lake file reset."

            # Build an array for the lakes
            lakearray = []

            # Open the shape folder directory
            os.chdir(env.workspace + "/master_lakes/lakes/")

            # Iterate through all shapefiles
            for file in glob.glob("*.shp"):

                ref_id = file[:-4]

                # Add to the array
                lakearray.append(file)

                # Count how many things the thing has
                number = arcpy.GetCount_management(file)
                dates = set()

                # Iterate through all elements of that lake
                count_cursor = arcpy.da.SearchCursor(file, ['date'])
                for crow in count_cursor:
                    dates.add(crow[0])

            # Make a union of the thing
                arcpy.Dissolve_management(file, output)

                # Get ready to add reference stuff to the thing
                arcpy.AddField_management(output, "ref_id", "STRING")
                arcpy.AddField_management(output, "n", "SHORT")
                arcpy.AddField_management(output, "n_real", "SHORT")
                arcpy.AddField_management(output, "n_ratio", "SHORT")

                # This cursor will let up change up that reference id
                cursor = arcpy.da.UpdateCursor(
                    output, ["ref_id", "n", "n_real", "n_ratio"])

                # Update that thang
                for row in cursor:
                    row[0] = ref_id
                    row[1] = int(number[0])
                    row[2] = len(dates)
                    row[3] = row[1] / row[2]
                    print "Adding lake", ref_id, "to new master lake file. Has", number[
                        0], "lake images over", row[2], "dates."
                    cursor.updateRow(row)

                del cursor

                # Add it to the master lake file
                arcpy.Append_management(output, masterlakefile, 'NO_TEST')

                # Remove the temp file
                arcpy.Delete_management(output)

            print "Master lake file updated."

        print "Success!"

        # Reset this thing
        bath.clean(0)
コード例 #27
0
ファイル: helpers.py プロジェクト: sfei/rwsm
def fasterJoin(fc,
               fcField,
               joinFC,
               joinFCField,
               fields,
               fieldsNewNames=None,
               convertCodes=False):
    """Custom function for joining feature class data sets, originally written by Marshall
    
    Arguments:
        fc {feature class} -- feature class to be updated
        fcField {string} -- feature class field for identifying related records in joinFC
        joinFC {feature class} -- feature class to join
        joinFCField {string} -- feature class field for identifying related records in fc
        fields {list} -- list of fields to add to fc from joinFC
    
    Keyword Arguments:
        fieldsNewNames {list} -- list of new names for fields being added (default: {None})
        convertCodes {bool} -- flag for converting codes to float values, required for some 
            operations (default: {False})
    """

    # Create joinList, which is a list of [name, type] for input fields
    listfields = arcpy.ListFields(joinFC)
    joinList = [[k.name, k.type] for k in listfields if k.name in fields]

    if fieldsNewNames:
        # Replace original names with new names in joinList and append old ones to list
        for name, typ in joinList:
            i = fields.index(name)
            joinList[joinList.index([name, typ])][0] = fieldsNewNames[i]
    else:
        fieldsNewNames = fields

    # As Field object types and AddField types have different names (shrug),
    # map object types to AddField types
    for name, typ in joinList:
        i = joinList.index([name, typ])
        if typ == 'Integer':
            joinList[i] = [name, 'LONG']
        elif typ == 'SmallInteger':
            joinList[i] = [name, 'SHORT']
        elif typ == 'String':
            joinList[i] = [name, 'TEXT']
        elif typ == 'Single':
            joinList[i] = [name, 'FLOAT']
        elif typ == 'Double':
            joinList[i] = [name, 'DOUBLE']

    # Add fields with associated names
    for name, typ in joinList:
        arcpy.AddField_management(fc, name, typ)

    joinDict = {}
    for f in fields:
        joinDict[f] = {}

    sFields = (joinFCField, ) + fields
    with arcpy.da.SearchCursor(joinFC, sFields) as cursor:
        for row in cursor:
            for f in fields:
                if convertCodes:
                    joinDict[f][float(row[0])] = row[fields.index(f) + 1]
                else:
                    joinDict[f][row[0]] = row[fields.index(f) + 1]

    uFields = (fcField, ) + fieldsNewNames
    with arcpy.da.UpdateCursor(fc, uFields) as cursor:
        for row in cursor:
            for f in fields:
                row[fields.index(f) + 1] = joinDict[f].get(row[0], None)
            cursor.updateRow(row)
コード例 #28
0
    ScriptUtils.AddMsgAndPrint("\tAdding and calculating new fields...")

    arcpy.AddField_management("ParcelDBF_Join", "PARCELID", "TEXT", "", "", 12)
    arcpy.AddField_management("ParcelDBF_Join", "LRSN", "DOUBLE", 10)
    arcpy.AddField_management("ParcelDBF_Join", "CONSIDERATION", "DOUBLE", 10)
    arcpy.AddField_management("ParcelDBF_Join", "PVA_NEIGHBOR", "LONG", 6)
    arcpy.AddField_management("ParcelDBF_Join", "PROP_CLASS", "SHORT", 3)
    arcpy.AddField_management("ParcelDBF_Join", "TRANSFER_DATE", "DATE")

    arcpy.CalculateField_management("ParcelDBF_Join", "PARCELID",
                                    "[ParcelCopy_PARCELID]")
    arcpy.CalculateField_management("ParcelDBF_Join", "LRSN",
                                    "[ParcelCopy_LRSN]")

    fields = arcpy.ListFields("ParcelDBF_Join")
    for f in fields:
        if "VSALES_Considerat" in f.name:
            arcpy.CalculateField_management("ParcelDBF_Join", "CONSIDERATION",
                                            "[{0}]".format(f.name))
        if "VSALES_Neighborho" in f.name:
            arcpy.CalculateField_management("ParcelDBF_Join", "PVA_NEIGHBOR",
                                            "[{0}]".format(f.name))
        if f.name == "VSALES_PC":
            arcpy.CalculateField_management("ParcelDBF_Join", "PROP_CLASS",
                                            "[{0}]".format(f.name))
        if "VSALES_Transfer_D" in f.name:
            arcpy.CalculateField_management("ParcelDBF_Join", "TRANSFER_DATE",
                                            "[{0}]".format(f.name))

    if not arcpy.Exists(outGDB):
コード例 #29
0
ファイル: canopy.py プロジェクト: ztpilgrim/canopy
def generate_gtpoints(phyreg_ids, min_area_sqkm, max_area_sqkm, min_points,
                      max_points):
    '''
    This function generates randomized points for ground truthing. It create
    the GT field in the output shapefile.

    phyreg_ids:     list of physiographic region IDs to process
    min_area_sqkm:  miminum area in square kilometers
    max_area_sqkm:  maximum area in square kilometers
    min_points:     minimum number of points allowed
    max_points:     maximum number of points allowed
    '''
    # fix user errors, if any
    if min_area_sqkm > max_area_sqkm:
        tmp = min_area_sqkm
        min_area_sqkm = max_area_sqkm
        max_area_sqkm = tmp

    if min_points > max_points:
        tmp = min_points
        min_points = max_points
        max_points = tmp

    phyregs_layer = canopy_config.phyregs_layer
    phyregs_area_sqkm_field = canopy_config.phyregs_area_sqkm_field
    naipqq_layer = canopy_config.naipqq_layer
    spatref_wkid = canopy_config.spatref_wkid
    analysis_year = canopy_config.analysis_year
    results_path = canopy_config.results_path

    arcpy.env.overwriteOutput = True
    arcpy.env.addOutputsToMap = False
    arcpy.env.outputCoordinateSystem = arcpy.SpatialReference(spatref_wkid)

    # make sure to clear selection because most geoprocessing tools use
    # selected features, if any
    arcpy.SelectLayerByAttribute_management(naipqq_layer, 'CLEAR_SELECTION')

    # select phyregs features to process
    arcpy.SelectLayerByAttribute_management(phyregs_layer,
            where_clause='PHYSIO_ID in (%s)' % ','.join(map(str, phyreg_ids)))
    with arcpy.da.SearchCursor(phyregs_layer,
            ['NAME', 'PHYSIO_ID', phyregs_area_sqkm_field]) as cur:
        for row in cur:
            name = row[0]
            print(name)
            # CreateRandomPoints cannot create a shapefile with - in its
            # filename
            name = name.replace(' ', '_').replace('-', '_')
            phyreg_id = row[1]
            area_sqkm = row[2]

            # +1 to count partial points; e.g., 0.1 requires one point
            point_count = int(min_points + (max_points - min_points) /
                (max_area_sqkm - min_area_sqkm) * (area_sqkm - min_area_sqkm)
                 + 1)

            print('Raw point count: %d' % point_count)
            if point_count < min_points:
                point_count = min_points
            elif point_count > max_points:
                point_count = max_points
            print('Final point count: %d' % point_count)

            outdir_path = '%s/%s/Outputs' % (results_path, name)
            shp_filename = 'gtpoints_%d_%s.shp' % (analysis_year, name)

            tmp_shp_filename = 'tmp_%s' % shp_filename
            tmp_shp_path = '%s/%s' % (outdir_path, tmp_shp_filename)

            # create random points
            arcpy.SelectLayerByAttribute_management(phyregs_layer,
                    where_clause='PHYSIO_ID=%d' % phyreg_id)
            arcpy.CreateRandomPoints_management(outdir_path, tmp_shp_filename,
                    phyregs_layer, '', point_count)

            # create a new field to store data for ground truthing
            gt_field = 'GT'
            arcpy.AddField_management(tmp_shp_path, gt_field, 'SHORT')

            # spatially join the naip qq layer to random points to find output
            # tile filenames
            shp_path = '%s/%s' % (outdir_path, shp_filename)
            arcpy.analysis.SpatialJoin(tmp_shp_path, naipqq_layer, shp_path)

            # delete temporary point shapefile
            arcpy.Delete_management(tmp_shp_path)

            # get required fields from spatially joined point layer
            with arcpy.da.UpdateCursor(shp_path, ['SHAPE@XY', gt_field,
                'FileName']) as cur2:
                for row2 in cur2:
                    # read filename
                    filename = row2[2][:-13]
                    # construct the final output tile path
                    cfrtiffile_path = '%s/cfr%s.tif' % (outdir_path, filename)
                    # read the output tile as raster
                    ras = arcpy.sa.Raster(cfrtiffile_path)
                    # resolution
                    res = (ras.meanCellWidth, ras.meanCellHeight)
                    # convert raster to numpy array to read cell values
                    ras_a = arcpy.RasterToNumPyArray(ras)
                    # get xy values of point
                    xy = row2[0]
                    # perform calculate_row_column to get the row and column of
                    # the point
                    rc = calculate_row_column(xy, ras.extent, res)
                    # update the point
                    row2[1] = ras_a[rc]
                    cur2.updateRow(row2)

            # delete all fields except only those required
            shp_desc = arcpy.Describe(shp_path)
            oid_field = shp_desc.OIDFieldName
            shape_field = shp_desc.shapeFieldName

            all_fields = arcpy.ListFields(shp_path)
            required_fields = [oid_field, shape_field, gt_field]
            extra_fields = [x.name for x in all_fields
                    if x.name not in required_fields]
            arcpy.DeleteField_management(shp_path, extra_fields)

    # clear selection again
    arcpy.SelectLayerByAttribute_management(phyregs_layer, 'CLEAR_SELECTION')

    print('Completed')
コード例 #30
0
    if usingInputSR or (inputSR and spatialReference and
                        spatialReference.factoryCode == inputSR.factoryCode):
        spatialReference = None

    saveRasters = (saveRasters.lower() == "true")
    if saveRasters:
        if not os.path.exists(rasterFolder):
            os.mkdir(rasterFolder)  #may throw an exception (thats ok)
        else:
            if not os.path.isdir(rasterFolder):
                utils.die(rasterFolder + " is not a folder. Quitting.")

    uniqueValues = None
    if subsetIdentifier in [
            field.name for field in arcpy.ListFields(locationLayer)
    ]:
        uniqueValues = UD_Isopleths.GetUniqueValues(locationLayer,
                                                    subsetIdentifier)
    if not uniqueValues:
        utils.die("Could not generate a list of unique values for " +
                  subsetIdentifier + ". Quitting.")

    #
    # Calculate smoothing factor(s)
    #
    if hRefmethod.lower() == "fixed":
        hList = [fixedHRef for eachItem in uniqueValues]
    else:
        hList = GetSmoothingFactors(subsetIdentifier, uniqueValues,
                                    locationLayer, hRefmethod, modifier,