def RotateFeatureClass(inputFC, outputFC, angle=0, pivot_point=None):
    """Rotate Feature Class

    inputFC     Input features
    outputFC    Output feature class
    angle       Angle to rotate, in degrees
    pivot_point X,Y coordinates (as space-separated string)
                Default is lower-left of inputFC

    As the output feature class no longer has a "real" xy locations,
    after rotation, it no coordinate system defined.
    """
    def RotateXY(x, y, xc=0, yc=0, angle=0, units="DEGREES"):
        """Rotate an xy cooordinate about a specified origin

        x,y      xy coordinates
        xc,yc   center of rotation
        angle   angle
        units    "DEGREES" (default) or "RADIANS"
        """
        import math
        x = x - xc
        y = y - yc
        # make angle clockwise (like Rotate_management)
        angle = angle * -1
        if units == "DEGREES":
            angle = math.radians(angle)
        xr = (x * math.cos(angle)) - (y * math.sin(angle)) + xc
        yr = (x * math.sin(angle)) + (y * math.cos(angle)) + yc
        return xr, yr

    # temp names for cleanup
    env_file = None
    lyrFC, lyrTmp, lyrOut = [None] * 3  # layers
    tmpFC = None  # temp dataset
    Row, Rows, oRow, oRows = [None] * 4  # cursors

    try:
        # process parameters
        try:
            xcen, ycen = [float(xy) for xy in pivot_point.split()]
            pivot_point = xcen, ycen
        except:
            # if pivot point was not specified, get it from
            # the lower-left corner of the feature class
            ext = arcpy.Describe(inputFC).extent
            xcen, ycen = ext.XMin, ext.YMin
            pivot_point = xcen, ycen

        angle = float(angle)

        # set up environment
        env_file = arcpy.CreateScratchName("xxenv", ".xml", "file",
                                           os.environ["TEMP"])
        arcpy.SaveSettings(env_file)

        # Disable any GP environment clips or project on the fly
        arcpy.ClearEnvironment("extent")
        arcpy.ClearEnvironment("outputCoordinateSystem")
        WKS = env.workspace
        if not WKS:
            if os.path.dirname(outputFC):
                WKS = os.path.dirname(outputFC)
            else:
                WKS = os.path.dirname(arcpy.Describe(inputFC).catalogPath)
        env.workspace = env.scratchWorkspace = WKS

        # Disable GP environment clips or project on the fly
        arcpy.ClearEnvironment("extent")
        arcpy.ClearEnvironment("outputCoordinateSystem")

        # get feature class properties
        lyrFC = "lyrFC"
        arcpy.MakeFeatureLayer_management(inputFC, lyrFC)
        dFC = arcpy.Describe(lyrFC)
        shpField = dFC.shapeFieldName
        shpType = dFC.shapeType
        FID = dFC.OIDFieldName

        # create temp feature class
        tmpFC = arcpy.CreateScratchName("xxfc", "", "featureclass")
        arcpy.CreateFeatureclass_management(os.path.dirname(tmpFC),
                                            os.path.basename(tmpFC), shpType)
        lyrTmp = "lyrTmp"
        arcpy.MakeFeatureLayer_management(tmpFC, lyrTmp)

        # set up id field (used to join later)
        TFID = "XXXX_FID"
        arcpy.AddField_management(lyrTmp, TFID, "LONG")
        arcpy.DeleteField_management(lyrTmp, "ID")

        # rotate the feature class coordinates
        # only points, polylines, and polygons are supported

        # open read and write cursors
        Rows = arcpy.SearchCursor(lyrFC, "", "", "%s;%s" % (shpField, FID))
        oRows = arcpy.InsertCursor(lyrTmp)
        if shpType == "Point":
            for Row in Rows:
                shp = Row.getValue(shpField)
                pnt = shp.getPart()
                pnt.X, pnt.Y = RotateXY(pnt.X, pnt.Y, xcen, ycen, angle)
                oRow = oRows.newRow()
                oRow.setValue(shpField, pnt)
                oRow.setValue(TFID, Row.getValue(FID))
                oRows.insertRow(oRow)
        elif shpType in ["Polyline", "Polygon"]:
            parts = arcpy.Array()
            rings = arcpy.Array()
            ring = arcpy.Array()
            for Row in Rows:
                shp = Row.getValue(shpField)
                p = 0
                for part in shp:
                    for pnt in part:
                        if pnt:
                            x, y = RotateXY(pnt.X, pnt.Y, xcen, ycen, angle)
                            ring.add(arcpy.Point(x, y, pnt.ID))
                        else:
                            # if we have a ring, save it
                            if len(ring) > 0:
                                rings.add(ring)
                                ring.removeAll()
                    # we have our last ring, add it
                    rings.add(ring)
                    ring.removeAll()
                    # if only one, remove nesting
                    if len(rings) == 1: rings = rings.getObject(0)
                    parts.add(rings)
                    rings.removeAll()
                    p += 1

                # if only one, remove nesting
                if len(parts) == 1: parts = parts.getObject(0)
                if dFC.shapeType == "Polyline":
                    shp = arcpy.Polyline(parts)
                else:
                    shp = arcpy.Polygon(parts)
                parts.removeAll()
                oRow = oRows.newRow()
                oRow.setValue(shpField, shp)
                oRow.setValue(TFID, Row.getValue(FID))
                oRows.insertRow(oRow)
        else:
            #raise Exception, "Shape type {0} is not supported".format(shpType) #UPDATE
            raise Exception("Shape type {0} is not supported".format(shpType))

        del oRow, oRows  # close write cursor (ensure buffer written)
        oRow, oRows = None, None  # restore variables for cleanup

        # join attributes, and copy to output
        arcpy.AddJoin_management(lyrTmp, TFID, lyrFC, FID)
        env.qualifiedFieldNames = False
        arcpy.Merge_management(lyrTmp, outputFC)
        lyrOut = "lyrOut"
        arcpy.MakeFeatureLayer_management(outputFC, lyrOut)
        # drop temp fields 2,3 (TFID, FID)
        fnames = [f.name for f in arcpy.ListFields(lyrOut)]
        dropList = ";".join(fnames[2:4])
        arcpy.DeleteField_management(lyrOut, dropList)

    #except MsgError, xmsg: #UPDATE
    except MsgError as xmsg:
        arcpy.AddError(str(xmsg))
    except arcpy.ExecuteError:
        tbinfo = traceback.format_tb(sys.exc_info()[2])[0]
        arcpy.AddError(tbinfo.strip())
        arcpy.AddError(arcpy.GetMessages())
        numMsg = arcpy.GetMessageCount()
        for i in range(0, numMsg):
            arcpy.AddReturnMessage(i)
    #except Exception, xmsg: #UPDATE
    except Exception as xmsg:
        tbinfo = traceback.format_tb(sys.exc_info()[2])[0]
        arcpy.AddError(tbinfo + str(xmsg))
    finally:
        # reset environment
        if env_file: arcpy.LoadSettings(env_file)
        # Clean up temp files
        for f in [lyrFC, lyrTmp, lyrOut, tmpFC, env_file]:
            try:
                if f: arcpy.Delete_management(f)
            except:
                pass
        # delete cursors
        try:
            for c in [Row, Rows, oRow, oRows]:
                del c
        except:
            pass

        # return pivot point
        try:
            pivot_point = "{0} {1}".format(*pivot_point)
        except:
            pivot_point = None

        return pivot_point
Ejemplo n.º 2
0
import arcpy
arcpy.env.workspace = "D:\\TOPRAK\\3"
featureClass = r"V:\3-CALISMALAR\01_SEMIH\00_TOPRAK_KAYNAKLARI\MDB3\outgdb.dbf"
rows = arcpy.SearchCursor(featureClass)
for semsem in range(1, 100, 1):
    row = rows.next()
    exname = row.AD + ".mdb"
    ex = row.AD
    girdi = "D:\\TOPRAK\\3"
    arazi = girdi + "\\" + exname + "\\Arazi_Katmani"
    features = arcpy.UpdateCursor(arazi)
    for feature in features:
        if feature.ana_sinif == "7":
            feature.ana_sinif = "6_Orman"
            features.updateRow(feature)
        if feature.ana_sinif == "8":
            feature.ana_sinif = "6_Mezarlik"
            features.updateRow(feature)
        if feature.ana_sinif == "9":
            feature.ana_sinif = "6_Aks"
            features.updateRow(feature)
        if feature.ana_sinif == "10":
            feature.ana_sinif = "6_Gol"
            features.updateRow(feature)
        if feature.ana_sinif == "11":
            feature.ana_sinif = "6_Yer"
            features.updateRow(feature)
        if feature.ana_sinif == "12":
            feature.ana_sinif = "6_Yol"
            features.updateRow(feature)
        if feature.ana_sinif == "13":
Ejemplo n.º 3
0
def createComplainRasterFeature(SelectSQL, InputComplainFeatures, POIFeatures,
                                FinalResultFeature):
    logging.info("Process: 创建" + FinalResultFeature)
    if (arcpy.Exists(FinalResultFeature)):
        arcpy.Delete_management(FinalResultFeature, "FeatureClass")
    rmNo = random.randint(100000000, 999999999)
    print rmNo
    # Process: 筛选
    print "Process: 筛选"
    logging.info("Process: 筛选")
    FeatureSelect = arcpy.Select_analysis(
        InputComplainFeatures, "in_memory/FeatureSelect" + repr(rmNo),
        SelectSQL)
    # Process: 点转栅格
    print FeatureSelect
    rowSear = arcpy.SearchCursor(FeatureSelect)
    row = rowSear.next()
    if (row):
        print "Process: 点转栅格"
        logging.info("Process: 点转栅格")
        tempEnvironment0 = arcpy.env.extent
        arcpy.env.extent = "115 23 122 29"
        ResultRaster = arcpy.PointToRaster_conversion(
            FeatureSelect, "OBJECTID", "in_memory/ResultRaster" + repr(rmNo),
            "COUNT", "NONE", ".0018")
        arcpy.env.extent = tempEnvironment0
        # Process: 栅格转点
        print "Process: 栅格转点"
        logging.info("Process: 栅格转点")
        COMPLAIN_RASTER_POINTS = arcpy.RasterToPoint_conversion(
            ResultRaster, "in_memory/COMPLAIN_RASTER_POINTS" + repr(rmNo),
            "VALUE")
        print "Process: 空间连接"
        # Process: 空间连接
        COMPLAIN_POI_UNION = arcpy.SpatialJoin_analysis(
            COMPLAIN_RASTER_POINTS, POI,
            "in_memory/COMPLAIN_POI_UNION" + repr(rmNo), "JOIN_ONE_TO_ONE",
            "KEEP_ALL", "", "CLOSEST", ".1 DecimalDegrees", "DISTANCE")
        print "Process: 点转栅格 (2)"
        logging.info("Process: 点转栅格 (2)")
        # Process: 点转栅格 (2)
        tempEnvironment0 = arcpy.env.extent
        arcpy.env.extent = "115 23 122 29"
        ResultRaster2 = arcpy.PointToRaster_conversion(
            COMPLAIN_POI_UNION, "OBJECTID",
            "in_memory/ResultRaster2" + repr(rmNo), "MOST_FREQUENT", "NONE",
            ".0018")
        arcpy.env.extent = tempEnvironment0
        print "Process: 栅格转面"
        logging.info("Process: 栅格转面")
        # Process: 栅格转面
        ResultFeature = arcpy.RasterToPolygon_conversion(
            ResultRaster2, "in_memory/ResultFeature" + repr(rmNo),
            "NO_SIMPLIFY", "VALUE")
        print "Process: 空间连接 (2)"
        logging.info("Process: 空间连接 (2)")
        # Process: 空间连接 (2)
        ResultFeatureZj = arcpy.SpatialJoin_analysis(
            ResultFeature, COMPLAIN_POI_UNION,
            "in_memory/ResultFeatureZj" + repr(rmNo), "JOIN_ONE_TO_ONE",
            "KEEP_ALL", "", "INTERSECT", "", "")
        # Process: 空间连接 (3)
        arcpy.SpatialJoin_analysis(FeatureSelect, ResultFeatureZj,
                                   FinalResultFeature, "JOIN_ONE_TO_ONE",
                                   "KEEP_ALL", "", "INTERSECT", "", "")
        #arcpy.SpatialJoin_analysis(FeatureSelect, ResultFeatureZj, FinalResultFeature, "JOIN_ONE_TO_ONE", "KEEP_ALL", "TIME \"TIME\" true true false 8 Date 0 0 ,First,#,D:\\HasmbyGis\\Cache.gdb\\GIS_OBJECT_COMPLAIN_Select1,TIME,-1,-1;WORK_ORDER_ID \"WORK_ORDER_ID\" true true false 100 Text 0 0 ,First,#,D:\\HasmbyGis\\Cache.gdb\\GIS_OBJECT_COMPLAIN_Select1,WORK_ORDER_ID,-1,-1;DISTANCE \"DISTANCE\" true true false 8 Double 0 0 ,First,#,D:\\HasmbyGis\\Cache.gdb\\Complain20140509_SpatialJoin,DISTANCE,-1,-1;POINTID \"POINTID\" true true false 4 Long 0 0 ,First,#,D:\\HasmbyGis\\Cache.gdb\\Complain20140509_SpatialJoin,POINTID,-1,-1;GRID_CODE \"聚合数\" true true false 4 Long 0 0 ,First,#,D:\\HasmbyGis\\Cache.gdb\\Complain20140509_SpatialJoin,GRID_CODE,-1,-1;Name \"聚合地址\" true true false 160 Text 0 0 ,First,#,D:\\HasmbyGis\\Cache.gdb\\Complain20140509_SpatialJoin,Name,-1,-1;Ctype \"聚合地址类型(原始)\" true true false 64 Text 0 0 ,First,#,D:\\HasmbyGis\\Cache.gdb\\Complain20140509_SpatialJoin,Ctype,-1,-1;CnType \"聚合地址类型\" true true false 50 Text 0 0 ,First,#,D:\\HasmbyGis\\Cache.gdb\\Complain20140509_SpatialJoin,CnType,-1,-1;CITY \"地市\" true true false 32 Text 0 0 ,First,#,D:\\HasmbyGis\\Cache.gdb\\Complain20140509_SpatialJoin,CITY,-1,-1;COUNTY \"区县\" true true false 32 Text 0 0 ,First,#,D:\\HasmbyGis\\Cache.gdb\\Complain20140509_SpatialJoin,COUNTY,-1,-1;GRID \"GRID\" true true false 32 Text 0 0 ,First,#,D:\\HasmbyGis\\Cache.gdb\\Complain20140509_SpatialJoin,GRID,-1,-1;SGLON \"栅格POI经度\" true true false 8 Double 0 0 ,First,#,D:\\HasmbyGis\\Cache.gdb\\Complain20140509_SpatialJoin,SGLON,-1,-1;SGLAT \"栅格POI纬度\" true true false 8 Double 0 0 ,First,#,D:\\HasmbyGis\\Cache.gdb\\Complain20140509_SpatialJoin,SGLAT,-1,-1;CQ_REGION \"城区网格所属区域\" true true false 60 Text 0 0 ,First,#,D:\\HasmbyGis\\Cache.gdb\\Complain20140509_SpatialJoin,CQ_REGION,-1,-1;CQ_REGION_TYPE \"城区网格区域属性\" true true false 60 Text 0 0 ,First,#,D:\\HasmbyGis\\Cache.gdb\\Complain20140509_SpatialJoin,CQ_REGION_TYPE,-1,-1;TEST_ID \"测试网格ID\" true true false 10 Text 0 0 ,First,#,D:\\HasmbyGis\\Cache.gdb\\Complain20140509_SpatialJoin,TEST_ID,-1,-1;TEST_GRIDID \"测试网格编号\" true true false 20 Text 0 0 ,First,#,D:\\HasmbyGis\\Cache.gdb\\Complain20140509_SpatialJoin,TEST_GRIDID,-1,-1;TEST_CLASS \"测试网格类型\" true true false 10 Text 0 0 ,First,#,D:\\HasmbyGis\\Cache.gdb\\Complain20140509_SpatialJoin,TEST_CLASS,-1,-1", "INTERSECT", "", "")

        arcpy.Delete_management(COMPLAIN_POI_UNION)
        arcpy.Delete_management(COMPLAIN_RASTER_POINTS)
        arcpy.Delete_management(ResultRaster)
        arcpy.Delete_management(ResultRaster2)
        arcpy.Delete_management(ResultFeature)
        arcpy.Delete_management(ResultFeatureZj)
        del COMPLAIN_POI_UNION, COMPLAIN_RASTER_POINTS, ResultRaster, ResultRaster2, ResultFeature, ResultFeatureZj
    arcpy.Delete_management(FeatureSelect)
    del FeatureSelect, rowSear
    logging.info("清理内存~~")
    gc.collect()
Ejemplo n.º 4
0
#-------------------------------------------------------------------------------
#!/usr/bin/env python

import arcpy
from datetime import datetime

#workspc = arcpy.GetParameterAsText(0)
output = arcpy.GetParameterAsText(0)

#arcpy.env.workspace = workspc
arcpy.env.overwriteOutput = "True"

fc3 = "Incident_Information"
fc2 = "Lead Agency"

rows = arcpy.SearchCursor(fc3)
row = rows.next()
arcpy.AddMessage("Get Incident Info")

while row:
    # you need to insert correct field names in your getvalue function
    LeadAgency = row.getValue("Lead_Agency")
    where2 = '"Lead_Agency" = ' + "'" + LeadAgency + "'"
    arcpy.AddMessage(where2)
    rows2 = arcpy.SearchCursor(fc2, where2)
    row2 = rows2.next()

    Phone = 'none'
    email = 'none'

    while row2:
Ejemplo n.º 5
0
def __updateEntityAttributes(fc, fldList, dom, logFile):
    """For each attribute (field) in fldList,
        adds attribute definition and definition source,
        classifies as range domain, unrepresentable-value domain or enumerated-value domain, and 
            for range domains, adds rangemin, rangemax, and units;
            for unrepresentable value domains, adds unrepresentable value statement; 
            for enumerated value domains:
            1) Finds all controlled-vocabulary fields in the table sent to it
            2) Builds a set of unique terms in each field, ie, the domain
            3) Matches each domain value to an entry in the glossary
            4) Builds a dictionary of term:(definition, source) items
            5) Takes the dictionary items and put them into the metadata
              document as Attribute_Domain_Values
        Field MapUnit in table DescriptionOfMapUnits is treated as a special case.
        """
    cantfindTerm = []
    cantfindValue = []
    dataSourceValues = []
    fcShortName = os.path.basename(fc)
    for fld in fldList:
        addMsgAndPrint('      Field: ' + fld)
        # if is _ID field or if field definition is available or if OBJECTID, update definition
        if fld.find('_ID') > -1 or attribDict.has_key(
                fld) or fld == 'OBJECTID':
            dom = __updateAttrDef(fld, dom)
        else:
            if not fld in EsriDefinedAttributes:
                cantfindTerm.append(fld)
        #if this is an _ID field
        if fld.find('_ID') > -1:
            dom = __updateUdom(fld, dom, unrepresentableDomainDict['_ID'])
        #or if this is another unrepresentable-domain field
        elif unrepresentableDomainDict.has_key(fld):
            dom = __updateUdom(fld, dom, unrepresentableDomainDict[fld])
        #or if this is a defined range-domain field
        elif rangeDomainDict.has_key(fld):
            dom = __updateRdom(fld, dom)
        #or if this is MapUnit in DMU
        elif fld == 'MapUnit' and fc == 'DescriptionOfMapUnits':
            dom = __updateUdom(fld, dom, unrepresentableDomainDict['default'])
        #if this is a defined Enumerated Value Domain field
        elif fld in enumeratedValueDomainFieldList:
            if debug:
                addMsgAndPrint(
                    'this is a recognized enumeratedValueDomainField')

            #create a search cursor on the field
            rows = arcpy.da.SearchCursor(fc, fld)
            # and get a list of all values
            valList = [row[0] for row in rows if not row[0] is None]
            #uniquify the list by converting it to a set object
            valList = set(valList)

            #create an empty dictionary object to hold the matches between the unique terms
            #and their definitions (grabbed from the glossary)
            defs = {}
            #for each unique term, try to create a search cursor of just one record where the term
            #matchs a Term field value from the glossary
            if fld == 'MapUnit' and fc <> 'DescriptionOfMapUnits':
                for t in valList:
                    query = '"MapUnit" = \'' + t + '\''
                    rows = arcpy.SearchCursor(DMU, query)
                    row = rows.next()
                    #if the searchcursor contains a row
                    if row:
                        #create an entry in the dictionary of term:[definition, source] key:value pairs
                        #this is how we will enumerate through the enumerated_domain section
                        defs[t] = []
                        if row.FullName <> None:
                            defs[t].append(row.FullName.encode('utf_8'))
                            defs[t].append(
                                'this report, table DescriptionOfMapUnits')
                        else:
                            addMsgAndPrint('MapUnit = ' + t +
                                           ', FullName not defined')
                            defs[t].append(row.Name.encode('utf_8'))
                            defs[t].append(
                                'this report, table DescriptionOfMapUnits')
                    else:
                        if not t in ('', ' '): cantfindValue.append([fld, t])

            elif fld == 'GeoMaterialConfidence' and fc == 'DescriptionOfMapUnits':
                if debug:
                    addMsgAndPrint('DMU / GeoMaterialsConfidence')
                defs = GeoMatConfDict
            elif fld == 'GeoMaterial' and fc == 'DescriptionOfMapUnits':
                if debug:
                    addMsgAndPrint('DMU / GeoMaterials!')
                for t in valList:
                    query = '"GeoMaterial" = \'' + t + '\''
                    if debug:
                        addMsgAndPrint('query=' + query)
                    rows = arcpy.SearchCursor(gmDict, query)
                    row = rows.next()
                    #if the searchcursor contains a row
                    if row:
                        if debug:
                            addMsgAndPrint(row.GeoMaterial + ' : ' +
                                           row.Definition.encode('utf_8'))
                        #create an entry in the dictionary of term:[definition, source] key:value pairs
                        #this is how we will enumerate through the enumerated_domain section
                        defs[t] = []
                        defs[t].append(row.Definition.encode('utf_8'))
                        defs[t].append(' GeMS documentation')
                    else:
                        addMsgAndPrint('GeoMaterial = ' + t +
                                       ': not defined in GeoMaterialDict')
                        cantfindValue.append([fld, t])

            elif fld.find('SourceID') > -1:  # is a source field
                for t in valList:
                    if debug:
                        addMsgAndPrint('Field ' + fld + ', appending ' + t +
                                       ' to dataSourceValues')
                    dataSourceValues.append(t)
                    query = '"DataSources_ID" = \'' + t + '\''
                    rows = arcpy.SearchCursor(dataSources, query)
                    row = rows.next()
                    #if the searchcursor contains a row
                    if row:
                        #create an entry in the dictionary of term:[definition, source] key:value pairs
                        #this is how we will enumerate through the enumerated_domain section
                        defs[t] = []
                        defs[t].append(row.Source.encode('utf_8'))
                        defs[t].append('this report, table DataSources')
                    else:
                        cantfindValue.append([fld, t])
            else:
                for t in valList:
                    query = '"Term" = ' + "'" + t + "'"
                    if debug:
                        addMsgAndPrint('query=' + query)
                    rows = arcpy.SearchCursor(gloss, query)
                    row = rows.next()
                    #if the searchcursor contains a row
                    if row:
                        #create an entry in the dictionary of term:[definition, source] key:value pairs
                        #this is how we will enumerate through the enumerated_domain section
                        defs[t] = []
                        defs[t].append(row.Definition.encode('utf_8'))
                        defs[t].append(
                            __findInlineRef(
                                row.DefinitionSourceID).encode('utf_8'))
                    else:
                        if fld <> 'GeoMaterial' and fc <> 'GeoMaterialDict':
                            cantfindValue.append([fld, t])
            dom = __updateEdom(fld, defs, dom)
        else:  #presumed to be an unrepresentable domain
            dom = __updateUdom(fld, dom, unrepresentableDomainDict['default'])
    if len(cantfindValue) > 0:
        logFile.write('Missing enumerated-domain values\n')
        logFile.write('  ENTITY     TERM     VALUE\n')
        for term in cantfindValue:
            logFile.write('  ' + fcShortName + '  ' + term[0] + ' **' +
                          term[1] + '**\n')
    if len(cantfindTerm) > 0:
        logFile.write('Missing terms\n')
        logFile.write('  ENTITY     TERM\n')
        for term in cantfindTerm:
            logFile.write('  ' + fcShortName + '  ' + term + '\n')
    dataSourceValues = set(dataSourceValues)
    return dom, dataSourceValues
Ejemplo n.º 6
0
    arcpy.Buffer_analysis(HNG_Lines, HNG_Lines_Buffer, HNG_Buffer_Width,
                          "FULL", "ROUND", "ALL")

    # Intersect each buffered company layer with the counties layer
    print "Intersecting " + AP_Lines_Buffer + " with " + Counties + "..."
    arcpy.Intersect_analysis([AP_Lines_Buffer, Counties], AP_Lines_Intersect)
    print "Intersecting " + HNG_Lines_Buffer + " with " + Counties + "..."
    arcpy.Intersect_analysis([HNG_Lines_Buffer, Counties], HNG_Lines_Intersect)

    print

    # Select each feature out of the intersected layers to create the final batch of layers
    working_folders = []

    print "Pulling out each feature in " + AP_Lines_Intersect + "..."
    AP_features = arcpy.SearchCursor(AP_Lines_Intersect)
    for AP_feature in AP_features:
        shapefilename = "AP_Buffer_" + AP_feature.Name + "_" + formattedDate
        newdestination = destination + shapefilename + "\\"
        os.mkdir(newdestination)
        filepath = newdestination + shapefilename + ".shp"
        arcpy.Select_analysis(AP_Lines_Intersect, filepath,
                              '"Name" = \'' + AP_feature.Name + '\'')
        # Created folder for layer

        zipped = zipfile.ZipFile(destination + shapefilename + ".zip", "w")
        for name in glob.glob(newdestination + "\*"):
            zipped.write(name, os.path.basename(name), zipfile.ZIP_DEFLATED)
        zipped.close()
        print "-Wrote to zip file " + shapefilename + ".zip"
        # Wrote folder to zip
Ejemplo n.º 7
0
def identical(SDE_FC_test, target_Display, sql, GID_bool):

    # Setup search cursors to create 2 lists for global ID values
    # Establish empty lists
    sql = "\"strCCEProjectNumber\"" + " = '" + cceNo + "'"
    fields = ["GlobalID", "PageNo", "SDEID"]
    GID_List1 = []  #; print GID_List1
    GID_List2 = []  #; print GID_List2
    u_List = []  #; print u_List
    i_List = []  #; print i_List
    u_bool = False
    i_bool = False
    SDE_Value = "GlobalID"
    if GID_bool == True:
        SDE_Value = "SDEID"

    # Start the for loop to cycle through all the rows and populate Global ID lists to compare
    s_GID_List1 = arcpy.SearchCursor(SDE_FC_test, "", "", "", "")
    print "Search cursor set... for Global ID 1"
    s_row1 = s_GID_List1.next()

    while s_row1:
        r = s_row1.getValue(SDE_Value)
        GID_List1.append(r)
        s_row1 = s_GID_List1.next()
    print "This is  Global ID list for Base Data... "
    print GID_List1
    del s_GID_List1

    # Second Search Loop
    s_GID_List2 = arcpy.SearchCursor(target_Display, sql, "", "", "")
    print "Search cursor set... for Global ID 2"
    s_row2 = s_GID_List2.next()

    while s_row2:
        s = s_row2.getValue("SDEID")
        GID_List2.append(s)
        s_row2 = s_GID_List2.next()

    print "This is  Global ID list for Target... "
    print GID_List2
    del s_GID_List2

    # Start loop to compare the Global ID lists and append to appropriate list for either update or insert
    ln = len(GID_List2)
    print ln
    for x in GID_List1:
        if x in GID_List2:
            u_List.append(x)
            u_bool = True
        else:
            i_List.append(x)
            i_bool = True

    print "This is the Insert List... "
    print i_List
    print "This is the Update List... "
    print u_List

    mainFunction(SDE_FC_test, target_Display, u_bool, i_bool, u_List, i_List,
                 sql, GID_bool)
Ejemplo n.º 8
0
# Count meters for validation purposes
metercount = int(str(arcpy.GetCount_management(meters)))

print "Setup complete."
print

# Now the loop to count meters inside each feature in each layer, and calculate "Meters_Inside"
for layer in layers:
    writer = csv.writer(open(filepath + layer + ".csv", 'wb'), dialect='excel')
    writer.writerow(["Area", "Count"])
    total = 0
    rows = []
    print "Counting meters in \"" + layer + "\"..."
    layer += layersuffix
    features = arcpy.SearchCursor(layer)
    for feature in features:
        arcpy.SelectLayerByAttribute_management(
            layer, "NEW_SELECTION", '"OBJECTID" = ' + str(feature.OBJECTID))
        arcpy.SelectLayerByLocation_management(meters, "WITHIN", layer)
        count = arcpy.GetCount_management(meters)
        count = int(str(count))
        arcpy.CalculateField_management(layer, field, count, "PYTHON", "")
        print str(count) + " meters in " + ReturnDesignator(feature, layer)
        writer.writerow([ReturnDesignator(feature, layer), count])
        total += count
    print
    print str(total) + " meters accounted for and " + str(
        metercount) + " total meters"
    if total == metercount:
        print "All meters accounted for!"
Ejemplo n.º 9
0
        arcpy.AddMessage("Training Classifier")
        TrainIsoClusterClassifier(
            "Comp", 10,
            "U:\Geospatial_Software_Final_Projects\ArcPy_Georeferencing\ClassDef.ecd",
            "#", 20, 20, 1)
        Classified = ClassifyRaster(
            "Comp",
            "U:\Geospatial_Software_Final_Projects\ArcPy_Georeferencing\ClassDef.ecd"
        )

        # Reclassify raster in order to recieve a count of pixels in each class
        ReclassForCount = Reclassify(Classified, "Value", RemapValue([[0, 0]]))

        # Select all pixels in class with fewest pixels and export only those pixels to a new raster layer
        # Sort the attribute table on the basis of the pixel count
        attributeTable = arcpy.SearchCursor(ReclassForCount)
        Count = 1000000000000
        for nextRecord in attributeTable:
            Count1 = nextRecord.getValue("Count")
            arcpy.AddMessage(str(Count))
            arcpy.AddMessage(str(Count1))
            if Count1 < Count:
                Count = Count1
        arcpy.MakeRasterLayer_management(ReclassForCount, "ReclassForCountlyr")
        selectionQuery = '"' + "Count" + '"' + " = " + str(Count)
        arcpy.SelectLayerByAttribute_management("ReclassForCountlyr",
                                                "NEW_SELECTION")
        arcpy.MakeRasterLayer_management("ReclassForCountlyr", "FewestPixels",
                                         selectionQuery)
        arcpy.CopyRaster_management("FewestPixels", "FewestPixelsDataset")
    sr = arcpy.Describe(inputPolylines).spatialReference
    arcpy.AddMessage("Creating output feature class...")
    arcpy.CreateFeatureclass_management(os.path.dirname(outputPolygons),
                                        os.path.basename(outputPolygons),
                                        "POLYGON", "#", "#", "#", sr)

    #Add ID field
    arcpy.AddMessage("Adding ID field ...")
    arcpy.AddField_management(outputPolygons, inputIDFieldName, "LONG")

    ellipseCount = arcpy.GetCount_management(inputPolylines)

    arcpy.AddMessage("Opening cursors ...")
    #Open Search cursor on polyline
    lineShapeFieldName = arcpy.Describe(inputPolylines).shapeFieldName
    inRows = arcpy.SearchCursor(inputPolylines)
    #Open Insert cursor on polygons
    outRows = arcpy.InsertCursor(outputPolygons, sr)

    for inRow in inRows:
        #Read polyline geometry as point collection
        inFeat = inRow.getValue(lineShapeFieldName)
        #Get ID
        inID = inRow.getValue(inputIDFieldName)
        arcpy.AddMessage("Building ellipse " + str(inID + 1) + " of " +
                         str(ellipseCount) + " from row " + str(inID))
        if debug == True: arcpy.AddMessage(str(inID))
        #Set point collection to polygon feature
        outFeat = outRows.newRow()
        outFeat.shape = inFeat
        #Set ID
Ejemplo n.º 11
0
# clear out shade features
print "\n" + "Deleting masks features"
arcpy.DeleteFeatures_management(masksFC)

## select 00 shades area
#print "\n" + "Selecting 00 shade area..."
#arcpy.SelectLayerByAttribute_management(shadesLayer, "NEW_SELECTION", fldAREA_NUM + " = '00'")
#ReportResult()

# get shades UpdateCursor
print "\n" + "Getting masks update cursor..."
masksInCur = arcpy.InsertCursor(masksFC)

# get cursor of all public works areas
print "\n" + "Getting counties cursor..."
countiesCur = arcpy.SearchCursor(countiesFC)

# loop through areas
print "\n" + "Looping through counties..."
for county in countiesCur:
    # get county number
    county_num = county.getValue(fldNum)

    # select area
    print "\n" + "Selecting County: " + county_num
    arcpy.SelectLayerByAttribute_management(countiesLayer, "NEW_SELECTION",
                                            fldNum + " = '" + county_num + "'")

    # run erase tool
    print "\n" + "Running Erase Tool..."
    if arcpy.Exists(tempFC):
Ejemplo n.º 12
0
	arcpy.Project_management("SHEDSmerge", "catchesProj", outputCoord)
	#create feature layer from EBTJV patch shapefile
	arcpy.MakeFeatureLayer_management("catchesProj", "catchLyr")
	#select catchments that intersect patches-- can specify "within", "completely within", or "have center in"
	arcpy.SelectLayerByLocation_management("catchLyr",selectType2,patchSub)
	##specify output file
	SHEDSout=os.path.join(outputDir,"Catchments_"+subsetFileName+".shp")
	#save shapefile
	arcpy.CopyFeatures_management("catchLyr", SHEDSout)
	#print output location
	arcpy.AddMessage("SHEDS Catchment File saved to: Catchments_%s.shp"%subsetFileName)
	
	##get catchment IDs in subsetted file for subsetting SHEDS data files
	catchIDs=[]
	#create search cursor
	rows=arcpy.SearchCursor(SHEDSout)
	##for each catchment in spatial join
	for row in rows:
		#get catchID and patchID
		catchIDs.append(int(float(row.getValue("featureid"))))
	del row, rows
	end = time()
	arcpy.AddMessage("Finished in "+str(round(((end-start)/60),2))+" Minutes")
	
#############################################################################################
##           	                SHEDS local/upstream data files                            ##
#############################################################################################
if shedsFilesLoc != "#":
	arcpy.AddMessage("\nStarting SHEDS local/upstream data subset..")
	#store in list
	shedsFilesList=shedsFilesLoc.split(";")
Ejemplo n.º 13
0
def streamcrosswalk(fc, HUC2_Lowr48, LYR_dir, outlocation, HUCdict, NoNHD,
                    csvpath, spec_to_include, name_dir):
    arcpy.MakeFeatureLayer_management(HUC2_Lowr48, "HUC48_lyr")
    for row in arcpy.SearchCursor(fc):
        filename = row.filename
        entid = row.EntityID
        if entid not in spec_to_include:
            continue

        sp_group = fc.split("_")[1]

        sp_gdb = sp_group + '_' + extractfiles + '.gdb'
        out_gdb = out_location + os.sep + sp_gdb
        CreateGDB(outlocation, sp_gdb, out_gdb)

        filename_new = filename.replace('_catchment', '')
        if filename_new.endswith('HUC12'):
            filename_new = filename_new
        else:
            filename_new = filename_new + "_" + extractfiles

        outfc = out_gdb + os.sep + filename_new
        out_layer = LYR_dir + os.sep + entid + ".lyr"

        # if not os.path.exists(out_layer):
        if not arcpy.Exists(outfc):
            whereclause = "EntityID = '%s'" % (entid)
            print whereclause
            arcpy.MakeFeatureLayer_management(fc, "lyr", whereclause)
            print "Creating layer {0}".format(entid)
            arcpy.SaveToLayerFile_management("lyr", out_layer, "ABSOLUTE")
            print "Saved layer file"

            spec_location = str(out_layer)

            arcpy.SelectLayerByLocation_management("HUC48_lyr", "INTERSECT",
                                                   spec_location)

            arcpy.MakeFeatureLayer_management("HUC48_lyr", "slt_lyr")
            with arcpy.da.SearchCursor("slt_lyr", HUC2Field) as cursor:
                HUC2list = sorted({row[0] for row in cursor})
                print HUC2list

            # for each value in the HUC2 set will select all HUC12 that are with species file, and save to a master
            # species HUC12 fc, one table per species will include all values

            # NOTE NOTE if the script is stop and an species that was started but not completed for a species it
            # must be deleted before starting the script again.If a table has been created the script will
            # move to the next species
            counter = 0
            for z in HUC2list:
                print z
                huc_fc = HUCdict.get(z)
                print huc_fc
                if huc_fc is not None:
                    arcpy.Delete_management("huc_lyr")
                    arcpy.MakeFeatureLayer_management(huc_fc, "huc_lyr")

                    if filename.endswith('HUC12'):
                        arcpy.SelectLayerByLocation_management(
                            "huc_lyr", "HAVE_THEIR_CENTER_IN", out_layer)
                    else:
                        arcpy.SelectLayerByLocation_management(
                            "huc_lyr", "INTERSECT", out_layer)
                    count = arcpy.GetCount_management("huc_lyr")
                    print str(count) + " selected features"

                    if count < 1:
                        print 'Zero'
                        if entid not in noNHD:
                            NoNHD.append(entid)
                        continue
                    if counter == 0:
                        if count != 0:
                            print outfc
                            arcpy.CopyFeatures_management("huc_lyr", outfc)
                            print "exported: " + str(outfc)
                            counter += 1
                            if entid in noNHD:
                                NoNHD.remove(id)
                        continue
                    if counter > 0:
                        arcpy.Append_management("huc_lyr", outfc, "NO_TEST",
                                                "", "")
                        counter += 1
            print "FC {0} completed. Located at {1}".format(outfc, out_gdb)
            del row, HUC2list
        else:
            print "{0}".format(outfc) + " previously populated"
            continue

    arcpy.Delete_management("HUC48_lyr")
    create_outtable(NoNHD, csvpath)
    return NoNHD
Ejemplo n.º 14
0
    distanceval = None
    bearingval = None

    points = arcpy.GetParameterAsText(0)
    distancefield = arcpy.GetParameterAsText(1)
    bearingfield = arcpy.GetParameterAsText(2)
    datetimefield = arcpy.GetParameterAsText(3)
    spikefield = arcpy.GetParameterAsText(4)
    maxdistance = arcpy.GetParameterAsText(5)
    maxdeviation = arcpy.GetParameterAsText(6)

    maxdistance = float(maxdistance)
    maxdeviation = float(maxdeviation)

    features = arcpy.UpdateCursor(points, "", None, "", datetimefield + " A")
    findfeatures = arcpy.SearchCursor(points, "", None, "",
                                      datetimefield + " A")
    feature = features.next()
    nextfeature = findfeatures.next()

    while feature:
        nextfeature = findfeatures.next()
        distanceval = feature.getValue(distancefield)
        bearingval = feature.getValue(bearingfield)
        feature.setValue(spikefield, "false")
        if (distanceval > maxdistance):
            #unusually high distance - could be a spike if it's the last point or if the next point also shows a high distance (returning from the outlier)
            if nextfeature:
                nextdistanceval = nextfeature.getValue(distancefield)
            else:
                nextdistanceval = distanceval
            if (nextdistanceval > maxdistance):
Ejemplo n.º 15
0
                                      )  # get right angle (arithmetic)
    rightBearing = geoBearing + (traversal / 2.0
                                 )  # get right bearing (geographic)
    if rightBearing < 0.0: rightBearing = 360.0 + rightBearing

    if debug == True:
        arcpy.AddMessage("arithemtic left/right: " + str(leftAngle) + "/" +
                         str(rightAngle))
    if debug == True:
        arcpy.AddMessage("geo left/right: " + str(leftBearing) + "/" +
                         str(rightBearing))

    centerPoints = []
    arcpy.AddMessage("Getting centers ....")
    shapefieldname = arcpy.Describe(prjInFeature).ShapeFieldName
    rows = arcpy.SearchCursor(prjInFeature)
    for row in rows:
        feat = row.getValue(shapefieldname)
        pnt = feat.getPart()
        centerPointX = pnt.X
        centerPointY = pnt.Y
        centerPoints.append([centerPointX, centerPointY])
    del row
    del rows

    paths = []
    arcpy.AddMessage("Creating paths ...")
    for centerPoint in centerPoints:
        path = []
        centerPointX = centerPoint[0]
        centerPointY = centerPoint[1]
Ejemplo n.º 16
0
def agri_v1_geoprocessing(project_name,
                          nutrient,
                          location,
                          in_agri,
                          in_factors_crop,
                          in_factors_livestock,
                          out_gdb,
                          messages,
                          out_arable=None,
                          out_pasture=None):
    """
    :param project_name: name of the project that will be used to identify the outputs in the geodatabase [required]
    :type project_name: str
    :param nutrient: nutrient of interest {possible values: 'N' or 'P'} [required]
    :type nutrient: str
    :param location: path of the feature class for the location of interest [required]
    :type location: str
    :param in_agri: path of the input feature class of the land cover data [required]
    :type in_agri: str
    :param in_factors_crop: path of the input table of the export factors for crop types [required]
    :type in_factors_crop: str
    :param in_factors_livestock: path of the input table of the export factors for livestock types [required]
    :type in_factors_livestock: str
    :param out_gdb: path of the geodatabase where to store the output feature classes [required]
    :type out_gdb: str
    :param messages: object used for communication with the user interface [required]
    :type messages: instance of a class featuring a 'addMessage' method
    :param out_arable: path of the output feature class for arable load [optional]
    :type out_arable: str
    :param out_pasture: path of the output feature class for pasture load [optional]
    :type out_pasture: str
    """

    # calculate load for arable
    messages.addMessage("> Calculating {} load for Arable.".format(nutrient))

    arcpy.MakeFeatureLayer_management(in_features=in_agri,
                                      out_layer='lyrArable')

    arcpy.AddField_management(in_table='lyrArable',
                              field_name="Area_ha",
                              field_type="DOUBLE",
                              field_is_nullable="NULLABLE",
                              field_is_required="NON_REQUIRED")
    arcpy.CalculateField_management(in_table='lyrArable',
                                    field="Area_ha",
                                    expression="!shape.area@hectares!",
                                    expression_type="PYTHON_9.3")

    winter_wheat, spring_wheat, winter_barley, spring_barley, winter_oats, spring_oats, potatoes, \
        sugar_beet, other_crops, other_cereals, pasture, export_factor = \
        None, None, None, None, None, None, None, None, None, None, None, None
    found = False
    for row in arcpy.SearchCursor(in_factors_crop):
        if row.getValue('FactorName') == '{}_factors'.format(nutrient):
            winter_wheat = float(row.getValue('WinterWheat'))
            spring_wheat = float(row.getValue('SpringWheat'))
            winter_barley = float(row.getValue('WinterBarley'))
            spring_barley = float(row.getValue('SpringBarley'))
            winter_oats = float(row.getValue('WinterOats'))
            spring_oats = float(row.getValue('SpringOats'))
            potatoes = float(row.getValue('Potatoes'))
            sugar_beet = float(row.getValue('SugarBeet'))
            other_crops = float(row.getValue('OtherCrops'))
            other_cereals = float(row.getValue('CerealOther'))
            pasture = float(row.getValue('Pasture'))
            export_factor = float(row.getValue('ExportFactor'))
            found = True
            break
    if not found:
        raise Exception('Factors for {} are not available in {}'.format(
            nutrient, in_factors_crop))

    arcpy.AddField_management(in_table='lyrArable',
                              field_name="Arab_calc",
                              field_type="DOUBLE",
                              field_is_nullable="NULLABLE",
                              field_is_required="NON_REQUIRED")
    arcpy.CalculateField_management(
        in_table='lyrArable',
        field="Arab_calc",
        expression=
        "(!total_cere! * {} + !other_crop! * {} + !potatoes! * {}) * {} / "
        "!Area_ha!".format(other_cereals, other_crops, potatoes,
                           export_factor),
        expression_type="PYTHON_9.3")

    if not out_arable:
        out_arable = sep.join(
            [out_gdb, project_name + '_{}_Arable'.format(nutrient)])

    arcpy.Intersect_analysis(in_features=[location, 'lyrArable'],
                             out_feature_class=out_arable,
                             join_attributes="ALL",
                             output_type="INPUT")

    arcpy.AddField_management(in_table=out_arable,
                              field_name="Area_ha2",
                              field_type="DOUBLE",
                              field_is_nullable="NULLABLE",
                              field_is_required="NON_REQUIRED")
    arcpy.CalculateField_management(in_table=out_arable,
                                    field="Area_ha2",
                                    expression="!shape.area@hectares!",
                                    expression_type="PYTHON_9.3")
    arcpy.AddField_management(in_table=out_arable,
                              field_name="Arab1calc",
                              field_type="DOUBLE",
                              field_is_nullable="NULLABLE",
                              field_is_required="NON_REQUIRED")
    arcpy.CalculateField_management(in_table=out_arable,
                                    field="Arab1calc",
                                    expression="!Arab_calc! * !Area_ha2!",
                                    expression_type="PYTHON_9.3")

    # calculate load for pasture
    messages.addMessage("> Calculating {} load for Pasture.".format(nutrient))

    arcpy.MakeFeatureLayer_management(in_features=in_agri,
                                      out_layer='lyrPasture')

    arcpy.AddField_management(in_table='lyrPasture',
                              field_name="Area_ha",
                              field_type="DOUBLE",
                              field_is_nullable="NULLABLE",
                              field_is_required="NON_REQUIRED")
    arcpy.CalculateField_management(in_table='lyrPasture',
                                    field="Area_ha",
                                    expression="!shape.area@hectares!",
                                    expression_type="PYTHON_9.3")

    dairy_cows, bulls, other_cattle, cattle_m_1, cattle_m_2, cattle_m_3, cattle_m_4, total_sheep, export_factor, \
        horses = None, None, None, None, None, None, None, None, None, None
    found = False
    for row in arcpy.SearchCursor(in_factors_livestock):
        if row.getValue('FactorName') == '{}_factors'.format(nutrient):
            dairy_cows = float(row.getValue('dairy_cows'))
            bulls = float(row.getValue('bulls'))
            other_cattle = float(row.getValue('other_cattle'))
            cattle_m_1 = float(row.getValue('cattle_m_1'))
            cattle_m_2 = float(row.getValue('cattle_m_2'))
            cattle_m_3 = float(row.getValue('cattle_m_3'))
            cattle_m_4 = float(row.getValue('cattle_m_4'))
            total_sheep = float(row.getValue('total_sheep'))
            horses = float(row.getValue('horses'))
            export_factor = float(row.getValue('ExportFactor'))
            found = True
            break
    if not found:
        raise Exception('Factors for {} are not available in {}'.format(
            nutrient, in_factors_livestock))

    arcpy.AddField_management(in_table='lyrPasture',
                              field_name="Past_calc",
                              field_type="DOUBLE",
                              field_is_nullable="NULLABLE",
                              field_is_required="NON_REQUIRED")
    arcpy.CalculateField_management(
        in_table='lyrPasture',
        field="Past_calc",
        expression="{} * (!bulls! * {} + !dairy_cows! * {} + "
        "!suckler_co! * {} + (!cattle_m_1! + !cattle_f_1!) * {} + "
        "(!cattle_m_2! + !cattle_f_2!) * {} + "
        "(!cattle_m_3! + !cattle_f_3! + !cattle_m_4! + "
        "!cattle_f_4! + !dairyheife! + !otherheife!) * {} + "
        "!total_shee! * {} + !horses! * {} + "
        "(!Hay! + !Pasture! + !Silage!)* {}) / !Area_ha!".format(
            export_factor, bulls, dairy_cows, other_cattle, cattle_m_1,
            cattle_m_2, cattle_m_3, total_sheep, horses, pasture),
        expression_type="PYTHON_9.3")

    if not out_pasture:
        out_pasture = sep.join(
            [out_gdb, project_name + '_{}_Pasture'.format(nutrient)])

    arcpy.Intersect_analysis(in_features=[location, 'lyrPasture'],
                             out_feature_class=out_pasture,
                             join_attributes="ALL",
                             output_type="INPUT")

    arcpy.AddField_management(in_table=out_pasture,
                              field_name="Area_ha2",
                              field_type="DOUBLE",
                              field_is_nullable="NULLABLE",
                              field_is_required="NON_REQUIRED")
    arcpy.CalculateField_management(in_table=out_pasture,
                                    field="Area_ha2",
                                    expression="!shape.area@hectares!",
                                    expression_type="PYTHON_9.3")

    arcpy.AddField_management(in_table=out_pasture,
                              field_name="Past1calc",
                              field_type="DOUBLE",
                              field_is_nullable="NULLABLE",
                              field_is_required="NON_REQUIRED")
    arcpy.CalculateField_management(in_table=out_pasture,
                                    field="Past1calc",
                                    expression="!Past_calc! * !Area_ha2!",
                                    expression_type="PYTHON_9.3")

    return out_arable, out_pasture
Ejemplo n.º 17
0
arcpy.AddMessage("Define arguments complete")

maxCWD = int(sys.argv[4])
numIterations = int(sys.argv[5])
startIteration = int(sys.argv[6])
rOut = sys.argv[7]

ws = env.workspace

arcpy.AddMessage("Convert arguments complete")

# get list of points
lstFIDs = []
lstBlobs = []
rows = arcpy.SearchCursor(inFC)
cnt = 0
for row in rows:
    # if cnt/10 == int(cnt/10):
    # arcpy.AddMessage("Succesfully entered the loop for count #"+str(cnt))
    if cnt >= (startIteration + numIterations):
        break
    if cnt >= startIteration:
        lstFIDs.append(row.FID)
        blob = row.getValue("Blob")
        lstBlobs.append(blob)
    cnt += 1

arcpy.AddMessage("FIDs: " + str(lstFIDs))

lstCentralityFiles = []
Ejemplo n.º 18
0
#Check if path exists
if os.path.exists(pdfPath):
    os.remove(pdfPath)

# Create the atlas pdf using the pdfPath
arcpy.AddMessage("Creating PDF at " + pdfPath)
pdfDoc = arcpy.mapping.PDFDocumentCreate(pdfPath)

# Now using Def SlopeRasterMask(1,2,3,4)
SlopeRasterMask(DEMToUse, RoadsToUse, RoadBuffer, S_R_Raster)
#myCurrentLayer = arcpy.mapping.Layer(S_R_Raster)
#arcpy.mapping.AddLayer(df, myCurrentLayer, "BOTTOM")

# finding name list for counties
myList = [row.getValue('NAME') for row in arcpy.SearchCursor(Project_Counties)]
pagenum = number_of_startPage

for county in myList:
    findName = county.replace('u', '')
    arcpy.AddMessage("Working on " + findName + "County")
    # Analyze the County, Kernal, etc.
    county_count = 1

    for myDensity in KernalDense.split(';'):
        arcpy.AddMessage("Kernal Denisty # being used is: " + str(myDensity))
        arcpy.AddMessage("Kernal Denisty type being used is: " +
                         str(type(myDensity)))
        countyToRaster(Project_Counties, findName, cellular, oneCountyBuffer,
                       CellTowerLocations, float(myDensity))
        #myCurrentLayer = arcpy.mapping.Layer(CellTowerLocations)
Ejemplo n.º 19
0
def mainFunction(SDE_FC_test, target_Display, u_bool, i_bool, u_List, i_List,
                 sql, GID_bool):

    SDE_Value = "GlobalID"
    if GID_bool == True:
        SDE_Value = "SDEID"
    else:
        pass

    desc1 = arcpy.Describe(SDE_FC_test)

    fieldlist1 = []
    for field1 in desc1.fields:
        fieldlist1.append(field1.name)

    desc2 = arcpy.Describe(target_Display)

    fieldlist2 = []
    for field2 in desc2.fields:
        fieldlist2.append(field2.name)

    s_GID = arcpy.SearchCursor(SDE_FC_test, sql, "", "", "")
    print "Search cursor set... for Insert row"
    if i_bool == True:
        print "Insert True..."

        #Create Insert Cursor (It is more efficient if the Insert Cursor is created outside the loop)
        i_cursSDE = arcpy.InsertCursor(target_Display)

        for s_row in s_GID:
            x = s_row.getValue(SDE_Value)
            print x
            z1 = s_row.getValue("MeasureType")
            z2 = s_row.getValue("MeasureLength")
            if "MeasureWidth" in fieldlist1:
                z2a = s_row.getValue("MeasureWidth")
            z3 = s_row.getValue("SurveyBy")
            z4 = s_row.getValue("SurveyByDate")
            z5 = s_row.getValue("BlockNo")
            z6 = s_row.getValue("PageNo")
            z7 = s_row.getValue("OmitYN")
            z8 = s_row.getValue("SHAPE")
            z9 = s_row.getValue(SDE_Value)
            z10 = s_row.getValue("strCCEProjectNumber")
            z11 = s_row.getValue("lngProjectID")
            z12 = s_row.getValue("SurveyNote")
            z13 = userInit
            today = datetime.datetime.now()
            z14 = today.strftime("%y/%m/%d")

            if x in i_List:
                row = i_cursSDE.newRow()
                row.MeasureType = z1
                row.MeasureLengthFt = z2
                if "MeasureWidthFt" in fieldlist2:
                    row.MeasureWidthFt = z2a
                row.SURVEYBY = z3
                row.BlockNo = z5
                row.SurveyByDate = z4
                row.PageNo = z6
                row.OmitYN = z7
                row.SHAPE = z8
                row.SDEID = z9
                row.strCCEProjectNumber = z10
                row.lngProjectID = z11
                row.SurveyNote = z12
                row.CalcBy = z13
                row.CalcByDate = z14

                i_cursSDE.insertRow(row)
                print "Row Inserted..."
                del row
        del i_cursSDE, x
    del s_GID, i_List

    arcpy.AddMessage(
        "Display Geometries have been added to the Final Display Feature Class"
    )

    s_GID = arcpy.SearchCursor(SDE_FC_test, sql, "", "", "")
    print "Search cursor set... for Insert row"

    if u_bool == True:
        print "Update True..."
        # Create Search cursor that will use update cursor
        # Setup update cursor for the Target SDE - This will cycle through the existing records in the SDE.

        for s_row in s_GID:
            # Get Values
            x = s_row.getValue(SDE_Value)
            print x
            if x in u_List:
                z1 = s_row.getValue("MeasureType")
                z2 = s_row.getValue("MeasureLength")
                if "MeasureWidth" in fieldlist1:
                    z2a = s_row.getValue("MeasureWidth")
                z3 = s_row.getValue("SurveyBy")
                z4 = s_row.getValue("SurveyByDate")
                z5 = s_row.getValue("BlockNo")
                z6 = s_row.getValue("PageNo")
                z7 = s_row.getValue("OmitYN")
                z8 = s_row.getValue("SHAPE")
                z9 = s_row.getValue(SDE_Value)
                z10 = s_row.getValue("strCCEProjectNumber")
                z11 = s_row.getValue("lngProjectID")
                z12 = s_row.getValue("SurveyNote")
                z13 = userInit
                today = datetime.datetime.now()
                z14 = today.strftime("%y/%m/%d")

                # Compare value from Feature Class to value list u_list
                u_cursSDE = arcpy.UpdateCursor(target_Display,
                                               "\"SDEID\"" + " = '" + x + "'",
                                               "", "", "")
                for row in u_cursSDE:
                    if row.getValue("SDEID") == x:

                        # Set Values
                        row.setValue("MeasureType", z1)
                        row.setValue("MeasureLengthFt", z2)
                        if "MeasureWidthFt" in fieldlist2:
                            row.setValue("MeasureWidthFt", z2a)
                        row.setValue("SURVEYBY", z3)
                        row.setValue("BlockNo", z5)
                        row.setValue("SurveyByDate", z4)
                        row.setValue("PageNo", z6)
                        row.setValue("OmitYN", z7)
                        row.setValue("SDEID", z9)
                        row.setValue("strCCEProjectNumber", z10)
                        row.setValue("lngProjectID", z11)
                        row.setValue("SurveyNote", z12)
                        row.setValue("CalcBy", z13)
                        row.setValue("CalcByDate", z14)
                        # Update row
                        u_cursSDE.updateRow(row)
                        print x + " was updated in SDE..."
                        del row
                del u_cursSDE
        del s_GID, x, u_List

    arcpy.AddMessage(
        "Display Geometries have been updated to the Final Display Feature Class"
    )
incident_layer_name = sub_layer_names["Destinations"]

input_feature = "D:\QimingShi\disease_visualization\Staph\closest_facility.gdb\\result29"

##field mapping
fm_type = arcpy.FieldMap()
fm_diam = arcpy.FieldMap()
fms = arcpy.FieldMappings()
fm_type.addInputField(inDestinations, "Name")
fm_diam.addInputField(inDestinations, "Total_Length")
fms.addFieldMap(fm_type)
fms.addFieldMap(fm_diam)

i = 0

for row in arcpy.SearchCursor(inOrgins):
    i = i + 1
    rows = arcpy.InsertCursor(
        "D:\QimingShi\disease_visualization\Staph\closest_facility.gdb\\result29"
    )
    rows.insertRow(row)
    if (i % 50 == 0 and i <= 1350) or i == 1387:
        print i
        print rows
        print str(datetime.datetime.now())

        arcpy.na.AddLocations(analysis_layer,
                              facility_layer_name,
                              input_feature,
                              "Name PAT_MRN_ID #",
                              "#",
        day = time.strftime("%m%d%Y")
        name = os.path.basename(layer)

        dataFields = [i.name for i in arcpy.ListFields(layer)]

        file = os.path.join(outPath, ("{0}{1}{2}".format(day, name, ".txt")))
        print file
        f = open(file, "w")

        writeFields = []

        for field in inFields:
            if field in dataFields:
                writeFields.append(field)

        for row in arcpy.SearchCursor(layer):
            # No longer need to reference name property (field.name)

            fieldVals = [row.getValue(field) for field in writeFields]
            # Replace nulls with empty strings
            fieldVals = ['' if i is None else i for i in fieldVals]
            fieldVals = [str(field) for field in fieldVals]
            out_string = ','.join(fieldVals)
            # Write the string--not the list--to the table
            f.writelines(out_string + "\n")
        del row
        f.close()

    arcpy.Delete_management(layer)
    arcpy.Delete_management("OldSeq")
Ejemplo n.º 22
0
# Insert buffer layer object into mxd data frame
arcpy.mapping.InsertLayer(df, outBuffer1Layer, outBuffer2Layer, "AFTER")

# ----------------
# Set mxd data frame spatial reference
# ----------------
# Create UTM layer object
utm10kmGridLayer = arcpy.mapping.Layer(inUTM10kmGridLayer)

# Select polygon in UTM layer that contains site point
arcpy.SelectLayerByLocation_management(utm10kmGridLayer, "CONTAINS",
                                       outPointLayer)

# Create UTM layer search cursor
cur = arcpy.SearchCursor(utm10kmGridLayer)

# Advance to first record
row = cur.next()

# Set variable to UTM_ZONE field value in selected record
utmZone = row.getValue("UTM_ZONE")

# Delete cursor and row objects to release locks on UTM Layer
del cur, row

# Set mxd data frame spatial reference to selected NAD83 UTM Zone
df.spatialReference = arcpy.SpatialReference(
    os.path.join(inFolderSpatialRef,
                 "NAD 1983 UTM Zone " + str(utmZone) + "N.prj"))
Ejemplo n.º 23
0
    slp = Slope(DEM_after, "DEGREE", "1")
    # Process: Slope
    asp = Aspect(DEM_after)
    # Process: Aspect

    # Extract raster values using point shapefile
    dem_fieldName = "dem" + str(iteration - 1)
    slp_fieldName = "slp" + str(iteration - 1)
    asp_fieldName = "asp" + str(iteration - 1)
    inRasterList = [[DEM_after, dem_fieldName], [slp, slp_fieldName],
                    [asp, asp_fieldName]]
    ExtractMultiValuesToPoints(point, inRasterList, "BILINEAR")

    # Read attribute table
    cursor = arcpy.SearchCursor(point)
    fields = [point_fieldName, dem_fieldName, slp_fieldName, asp_fieldName]
    point_table = [0]
    dem_table = [0]
    dh_table = [0]
    slp_table = [0]
    asp_table = [0]
    for row in cursor:
        point_table.append(row.getValue(fields[0]))
        dem_table.append(row.getValue(fields[1]))
        dh_table.append(row.getValue(fields[0]) - row.getValue(fields[1]))
        slp_table.append(row.getValue(fields[2]))
        asp_table.append(row.getValue(fields[3]))

    point_table = np.array(point_table[1:])
    dem_table = np.array(dem_table[1:])
Ejemplo n.º 24
0
    def identifySuitable(self):

        ### Preamble:

        # start_time = time.time()
        # print start_time
        # Check out the ArcGIS Spatial Analyst extension license
        arcpy.CheckOutExtension("Spatial")

        arcpy.env.overwriteOutput = True
        '''
        ############################################################################################################
        ## --------------------------------------- GET ALL INPUTS ----------------------------------------------- ##
        ############################################################################################################
        '''
        #####################
        ## USER SET INPUTS ##
        #####################

        #yourSpace = "R:\\users\\anagha.uppal\\MapRE\\MapRE_Data\\" ##^^ This is the directory path before the IRENA folder structure
        #defaultInputWorkspace = yourSpace + "INPUTS\\" ##^^ enter the path to your DEFAULT INPUT path

        ##########################
        ## SET FIXED PARAMETERS OR INPUTS ##
        ##########################

        arcpy.env.workspace = self.out_suitableSites_gdb

        ## FIXED PARAMETERS
        days = 365
        hours = 8760

        ### Other conditional clauses. Change as needed:
        ifTrue = 1
        ifFalse = 0

        ## BUFFER
        sideType = "FULL"
        endType = "ROUND"
        dissolveType = "ALL"

        selectIntermediate_geoUnits = "in_memory/selectIntermediate_geoUnits"

        ###############
        ## FUNCTIONS ##
        ###############
        def getFields(data):
            fieldList = []
            fields = arcpy.ListFields(data)
            for field in fields:
                fieldList.append(field.name)
            return fieldList

        '''
        #####################################################################################
        #### --------------------------------GEOPROCESSES--------------------------------####
        #####################################################################################
        '''
        '''
        ############################################
        ## Set environments and scratch workspace ##
        ############################################
        '''

        # set environments for raster analyses
        arcpy.env.extent = self.countryBounds
        arcpy.env.mask = self.countryBounds
        arcpy.env.snapRaster = self.templateRaster
        arcpy.env.cellSize = self.templateRaster

        ## INPUTS
        scriptpath = sys.path[0]
        toolpath = os.path.dirname(scriptpath)
        # tooldatapath = os.path.join(toolpath, "FOLDERNAME")
        # datapath = os.path.join(tooldatapath, "FILENAME.")

        ## SET SCRATCH WORKSPACES (AND CREATE SCRATCH.GDB IF IT DOESN'T EXIST)
        # scratchws = env.scratchWorkspace
        # scriptpath = sys.path[0]
        # toolpath = os.path.dirname(scriptpath)
        # if not env.scratchWorkspace:
        #    if not(os.path.exists(os.path.join(toolpath, "Scratch/scratch.gdb"))): # Create new fgdb if one does not already exist
        #        arcpy.AddMessage("Creating fgdb " + os.path.join(toolpath, "Scratch/scratch.gdb"))
        #        arcpy.CreateFileGDB_management(toolpath + "/Scratch", "scratch.gdb")
        #    scratchws = os.path.join(toolpath, "Scratch/scratch.gdb")
        #    arcpy.AddMessage("Set scratch workspace")
        env.scratchWorkspace = self.scratch
        '''
        ##############
        ## Read CSV ##
        ##############
        '''
        with open(self.csvInput, "rt") as csvfile:
            reader = csv.reader(csvfile, delimiter=',')
            fields = next(reader)
            inputData = []
            for row in reader:
                inputData.append(dict(zip(fields, row)))

        ## inputDataPath is a dictionary of all the input datasets
        inputDataPath = {}

        ## populate the inputDataPath for each of the data categories.
        for dataCategory in fields:
            inputDataPath.update({dataCategory: [inputData[0][dataCategory], \
                                                 inputData[1][dataCategory], inputData[2][dataCategory]]})

        #    print dataCategory
        #    if not(inputData[0][dataCategory] == "no"):
        #        if (inputData[1][dataCategory] == "default"):
        #            inputDataPath[dataCategory] = defaultInputWorkspace + inputData[2][dataCategory] ##^^ enter local path for rail file.
        #        elif (inputData[1][dataCategory] == "country"):
        #            inputDataPath[dataCategory] = countryWorkspace + inputData[2][dataCategory] ##^^ enter local path for rail file.
        #        else: print dataCategory + "no data"
        #    print inputDataPath[dataCategory]

        ## Calculate the non-technology-specific conditional rasters for the data categories that may or may not have any datasets. If the data for that category does not exist, then the conditional raster variable is assigned a scalar value of 1
        '''
        ########################
        ## Raster Calculation ##
        ########################
        '''
        ## initiate rasterSelection_constraints
        rasterSelection_constraints = 1

        ## CALCULATE CONSTRAINT-ONLY RASTER
        for constraint in inputDataPath:
            if inputDataPath[constraint][0] == "yes":
                rasterSelection = Con(inputDataPath[constraint][1], ifTrue, ifFalse, \
                                      str(inputDataPath[constraint][2]))
                rasterSelection_constraints = rasterSelection * rasterSelection_constraints
                arcpy.AddMessage("Finished raster calculation for " +
                                 constraint)

        ## LISTS TO HOLD THE AREAS AND WRITE TO CSV
        areaSumList = ['Area_km2']
        generationSumList = ['Generation_MWh']
        areaLabelList = ['Scenarios']
        subunitsList = ['Subregions']

        ## CREATE THRESHOLD SCENARIOS
        for threshold in self.thresholdList:
            resourceArea = Con(self.resourceInput, ifTrue, ifFalse,
                               "Value >= " + str(threshold))
            rasterSelection_final = rasterSelection_constraints * resourceArea
            arcpy.AddMessage(
                "Finished raster calculation for resource threshold: " +
                str(threshold))

            if self.countryBounds == "":
                outExtractByMask = rasterSelection_final
            else:
                outExtractByMask = ExtractByMask(rasterSelection_final,
                                                 self.countryBounds)

            thresholdStr = str(threshold)
            thresholdStr = thresholdStr.replace(".", "_")

            thresholdFileName = self.technology + "_" + thresholdStr
            outputFileName = os.path.join(self.out_suitableSites_gdb, \
                                          str(thresholdFileName) + "_" + self.fileNameSuffix)

            ## Raster to polygon conversion
            intermediate = arcpy.RasterToPolygon_conversion(
                outExtractByMask, "in_memory/intermediate", "NO_SIMPLIFY",
                "Value")
            ## Process: select gridcode = 1
            intermediateFields = getFields(intermediate)
            ## check the name of the "grid code" field in the polygon output.
            if "grid_code" in intermediateFields:
                selectIntermediate = arcpy.Select_analysis(
                    intermediate, "in_memory/selectIntermediate",
                    '"grid_code" = 1')

            if "gridcode" in intermediateFields:
                selectIntermediate = arcpy.Select_analysis(
                    intermediate, "in_memory/selectIntermediate",
                    '"gridcode" = 1')

            ## INTERSECT Geographic Unit of Analysis, if provided
            if arcpy.Exists(self.geoUnits):
                arcpy.AddMessage(
                    "Intersecting by geographic units of analysis")
                arcpy.Intersect_analysis([selectIntermediate, self.geoUnits],
                                         selectIntermediate_geoUnits, "NO_FID")
            else:
                selectIntermediate_geoUnits = selectIntermediate

            # Process: Add Field
            arcpy.AddField_management(selectIntermediate_geoUnits, "Area",
                                      "DOUBLE", "", "", "", "", "NULLABLE",
                                      "NON_REQUIRED", "")

            # Process: Calculate Field
            arcpy.CalculateField_management(selectIntermediate_geoUnits,
                                            "Area",
                                            "!Shape.Area@squarekilometers!",
                                            "PYTHON_9.3", "")

            # Process: select areas above minimum contiguous area and SAVE to file
            select = arcpy.Select_analysis(selectIntermediate_geoUnits, outputFileName, \
                                           '"Area" >= ' + str(self.minArea))

            if self.save_subunits_workspace != "":  ## save subunits
                arcpy.Split_analysis(select, self.geoUnits,
                                     self.geoUnits_attribute,
                                     self.save_subunits_workspace)

            if self.rasterOutput.lower() == 'true':  ##save the raster output
                out_resourceRaster = ExtractByMask(self.resourceInput, select)
                out_resourceRaster.save(outputFileName + "_resourceRaster")

            # get total area of potential:
            arcpy.AddMessage("Finished resource estimate for threshold: " +
                             str(threshold) + ", start calculating area")
            cursor = arcpy.SearchCursor(select)

            if self.geoUnits_attribute == "":
                areaList = []
                generationList = []
                for row in cursor:
                    area = row.getValue("Area")

                    generation = area * self.landUseEfficiency * self.avgCF * 8760 / 1000 * self.landUseDiscount

                    generationList.append(generation)
                    areaList.append(area)
                areaSumList.append(sum(areaList))
                generationSumList.append(sum(generationList))
                areaLabelList.append(
                    str(thresholdFileName) + "_" + self.fileNameSuffix)
                areaTable = [areaLabelList, areaSumList, generationSumList]
            else:
                areaList = []
                generationList = []
                geoUnits_attributeList = []
                areaNameDict = {}
                for row in cursor:
                    attribute = row.getValue(self.geoUnits_attribute)
                    area = row.getValue("Area")
                    if (attribute not in areaNameDict):
                        areaNameDict[attribute] = area
                    elif (attribute in areaNameDict):
                        areaNameDict[
                            attribute] = areaNameDict[attribute] + area

                geoUnits_attributeList = list(areaNameDict.keys())
                areaList = list(areaNameDict.values())
                for key in areaNameDict:
                    generation = areaNameDict[
                        key] * self.landUseEfficiency * self.avgCF * 8760 / 1000 * self.landUseDiscount
                    generationList.append(generation)

                #areaList.append(area)
                #geoUnits_attributeList.append(attribute)
                # geoattrUnique = list(set(geoUnits_attributeList))
                #
                # # initialise data of lists.
                # data = {'Geo Unit': geoUnits_attributeList,
                #         'Area': areaList,
                #         'Generation': generationList,
                #         }
                # # Create DataFrame
                # df = pd.DataFrame(data)
                # for value in geoattrUnique:
                #     areaSumList.append(df.loc[df['Geo Unit'] == value, 'Area'].sum())
                #     generationSumList.append(df.loc[df['Geo Unit'] == value, 'Generation'].sum())
                areaSumList = areaSumList + areaList
                generationSumList = generationSumList + generationList
                subunitsList = subunitsList + geoUnits_attributeList
                areaLabelList.append(
                    str(thresholdFileName) + "_" + self.fileNameSuffix)
                # areaTable = [areaLabelList, subunitsList, areaSumList, generationSumList]
                areaTable = [
                    areaLabelList, subunitsList, areaSumList, generationSumList
                ]

                #areaLabelList.append(str(thresholdFileName) + "_" + self.fileNameSuffix)
                #areaTable = [areaLabelList, geoattrUnique, areaSumList, generationSumList]
        '''
        #######################################
        ## Write area csv for all thresholds ##
        #######################################
        '''

        if arcpy.Exists(self.geoUnits):
            pass

        # Write Area Sums table as CSV file
        with open(self.csvAreaOutput + ".csv", 'w') as csvfile:
            writer = csv.writer(csvfile)
            [writer.writerow(r) for r in areaTable]
Ejemplo n.º 25
0
import arcpy
a = 4544
v = "CGCS2000_3_Degree_GK_CM_108E"
mxd1 = arcpy.mapping.MapDocument("CURRENT")
df = arcpy.mapping.ListDataFrames(mxd1)[0]
layer = arcpy.mapping.ListLayers(mxd1)[0]
# b= arcpy.Describe(ur"E:\move on move on\公示图\公示图.mxd")
# c= b.spatialReference
# print  c
print df.spatialReference

df.spatialReference = arcpy.SpatialReference(4545)
cursor = arcpy.da.SearchCursor(layer, "OBJECTID")
for row in cursor:
    # arc_id = row[0]
    # print type(arc_id)
    # print arc_id
    # print type(row)
    # print row
    # print row
    # for field_name in row:
    # 	print field_name
    pass

cursor = arcpy.SearchCursor(layer, "")
for row in cursor:
    a = [row.getValue('OBJECTID')]  # list 字段OBJECTID的list
    for b in a:
        print b

print a
Ejemplo n.º 26
0
    # Execute DeleteField
    arcpy.DeleteField_management(inTable, dropFields)
    arcpy.AddMessage("non-essential Fields were removed from spatial join")

    ##########part 2
    #add new field cbgtotal and add values to it
    fn1 = "CBGTOTAL"
    f1precision = 9
    arcpy.AddField_management(inTable, fn1, "LONG", 9)
    arcpy.AddMessage("CBGTOTAL field added")

    #for all select geography with the same value, CGBTOTAL = sum of join_count
    #or create an array for all possible values; create a list of all unique field values

    srows = arcpy.SearchCursor(inTable)
    srow = srows.next()
    mylist = []

    while srow:  #while a row exists in a cursor
        #here
        geog = srow.GEOGRAPHY
        #add every value in the geography field to the mylist array
        mylist.append(geog)

        #need to store just unique geog values in an array
        srow = srows.next()
        geog_set = set(mylist)

    arcpy.AddMessage("Done search cursor for Geography column")
    #print all unique values in geography column just once
Ejemplo n.º 27
0
# -*- coding: cp936 -*-
import arcpy
import os
input_shp = r"F:\PLA_Analysis2016\TestData\RoadLineLevel0\导航道路数据\NavigationRoad.shp"
clip_fea = r"F:\PLA_Analysis2016\TestData\RoadLineLevel0\影像镶嵌框\360826_Taihexian_MosaicBoundary.shp"
#保存文件夹路径
save_floder = r"F:\PLA_Analysis2016\TestData\RoadLineLevel0\www"
arcpy.MakeFeatureLayer_management(clip_fea, "lyr") 
icursor = arcpy.SearchCursor(clip_fea)
for row in icursor:
    igetvalue = row.getValue("TaskID")
#     igetvalue2 = row.getValue("name")
    print igetvalue 
    sql = '"TaskID" =' + "'" + igetvalue + "'"   #'"TaskID" = %s'%igetvalue
    arcpy.SelectLayerByAttribute_management("lyr","NEW_SELECTION",sql)
    #保存结果的文件名命名
    #save_name = os.path.basename(os.path.splitext(input_shp)[0]) + "_" + '%s'%igetvalue2
    save_name = "360826_Taihexian_NavigationRoad" + "_part" +  str(igetvalue) + "_8.shp" #'%s'%igetvalue2 + ".shp"
    print save_name
    #outsave = os.path.join(save_floder,save_name)
    outsave = save_floder + "\\" + save_name
    arcpy.Clip_analysis(input_shp,"lyr",outsave)
    arcpy.SelectLayerByAttribute_management("lyr","CLEAR_SELECTION")
print "over"
Ejemplo n.º 28
0
#
# Author:      panda (stack exchange)
#
# Created:     04/01/2019
# Copyright:
# Licence:     <your licence>
#-------------------------------------------------------------------------------
import arcpy
... mxd = arcpy.mapping.MapDocument("CURRENT")
... infeature="Pipes_7"
... field_in="created_date"
... field_out="COUNT_"+field_in
... #create the field for the count values
... arcpy.AddField_management(infeature,field_out,"SHORT")
... #creating the list with all the values in the field, including duplicates
... lista=[]
... cursor1=arcpy.SearchCursor(infeature)
... for row in cursor1:
...     i=row.getValue(field_in)
...     lista.append(i)
... del cursor1, row
... #updating the count field with the number on occurrences of field_in values
... #in the previously created list
... cursor2=arcpy.UpdateCursor(infeature)
... for row in cursor2:
...     i=row.getValue(field_in)
...     occ=lista.count(i)
...     row.setValue(field_out,occ)
...     cursor2.updateRow(row)
... del cursor2, row
... print ("Done.")
Ejemplo n.º 29
0
from arcpy import env
env.overwriteoutput = True
inputfc = arcpy.GetParameterAsText(
    0)  # Get the input feature class from the user
outputfc = arcpy.GetParameterAsText(
    1)  #get the output feature class from the user
outpercent = int(arcpy.GetParameterAsText(
    2))  #get the percentage from the user and convert it to an integer
desc = arcpy.Describe(
    inputfc
)  #create a describe object to access properties of input feature class
inlist = []  #create a list in which you will put the OIDs of the features
randomlist = []  #create a list to fill with OIDs of random features chosen.
fldname = desc.OIDFieldName  #access the OID field name of input feature class
rows = arcpy.SearchCursor(
    inputfc
)  #create a search cursor to iterate over the rows of input feature class
row = rows.next(
)  #a variable that will be true as long as it is not at the last row of feature class
while row:  #iterate over feature class until the last feature
    id = row.getValue(fldname)  #get OIDs
    inlist.append(id)  #add OID to list
    row = rows.next()  #go to next row if possible
while len(randomlist) < int(
    (outpercent / float(100)) * len(inlist)
):  #converts percentage given as user input into decimal and multiplies it by number of features in input feature class,which is length of inlist, to give you number of random features to select
    selitem = np.random.choice(
        inlist, replace=False
    )  #picks random features from OID list without replacement
    randomlist.append(selitem)  #appends random feature to randomlist
    #inlist.remove(selitem) not needed since we are using numpy function instead that picks random numbers without replacement
    
#Starts Geoprocessing
arcpy.env.overwriteOutput = True

myWorkspace=r"C:\Data\WWF\Processing"
arcpy.env.workspace = myWorkspace
gdbList = arcpy.ListWorkspaces("*", "FileGDB")


#Set Input Output variables
#inputFile = fc #<-- CHANGE
#outDir = u"C:\Data\WWF\Test_processes\\" #<-- CHANGE
#Get list of priority places
fc = r"C:\Data\WWF\Study_scope\WWF_PP_Terr.gdb\WWF_PP_Terr"
field = "FLAG_NAME"
cursor = arcpy.SearchCursor(fc)
PP_list_fishnet=[]
PP_list_temp=[]
PP_list_roads=[]
for row in cursor:
    t=row.getValue(field)
    PP_list_temp.append(t)
    for i in PP_list_temp:
        t=str(i)
    g = t.replace(" ", "_")
    PP_list_temp.append(g)
    for i in PP_list_temp:
        g=str(i)+ "_fishnet"
        h=str(i)+ "_roads"
    PP_list_roads.append(h)
    PP_list_fishnet.append(g)