Example #1
0
    def Select_By_Location(self,
                           Connection,
                           ref_layer,
                           distance=0,
                           New_Layer=None,
                           invert=''):
        '''
        Connection:
        1) ARE_IDENTICAL_TO 
        2) BOUNDARY_TOUCHES
        3) ARE_IDENTICAL_TO
        4) INTERSECT
        5) HAVE_THEIR_CENTER_IN
        6) WITHIN
        7) WITHIN_A_DISTANCE
        8) COMPLETELY_WITHIN
        9) SHARE_A_LINE_SEGMENT_WITH
        DISTANCE:
        exp: "5 Meters"
        '''
        if invert != '':
            invert = "INVERT"

        FeatureLyr = arcpy.MakeFeatureLayer_management(self.layer,
                                                       self.name + '_Layer')
        arcpy.SelectLayerByLocation_management(FeatureLyr, Connection,
                                               ref_layer, distance, '', invert)
        if not New_Layer:
            arcpy.DeleteFeatures_management(FeatureLyr)
        else:
            arcpy.Select_analysis(FeatureLyr, New_Layer)

        if New_Layer:
            return New_Layer
Example #2
0
def compute_CII_per_island():
    # Compute the CII score per island as zonal statistics
    arcpy.CheckOutExtension("Spatial")
    arcpy.sa.ZonalStatisticsAsTable("buffered_islands", "STRONG", "cii_overall_score_ras1",
                                    "islands_with_CII_scores_table", "DATA", "MEAN")
    # Rename field MEAN to CII_Score_Overall
    arcpy.AlterField_management("islands_with_CII_scores_table", "MEAN", "CII_Score_Overall")
    # Join the resulting table back to the original islands feature class
    arcpy.AddJoin_management("islands", "STRONG", "islands_with_CII_scores_table", "STRONG", "KEEP_ALL")
    # Save to a new feature class
    arcpy.CopyFeatures_management("islands", "islands_with_score_with_nulls")
    arcpy.RemoveJoin_management("islands")

    # Remove any islands where the CII_Score_Overall is null ("> 0" does that)
    # Note: I did it differently from the other ones, because CopyFeatures_management()
    #       was not dropping the nulls for some reason
    arcpy.Select_analysis("islands_with_score_with_nulls", "islands_with_score", 'islands_with_CII_scores_table_CII_Score_Overall > 0')

    # Delete some unnecessary fields
    drop_fields = ["islands_with_CII_scores_table_OBJECTID","islands_with_CII_scores_table_STRONG",
                  "islands_with_CII_scores_table_COUNT", "islands_with_CII_scores_table_AREA"]
    arcpy.DeleteField_management("islands_with_score", drop_fields)

    # Rename some fields to their alias, to get rid of exagerated long names
    field_list = arcpy.ListFields("islands_with_score")
    for field in field_list:
        print(field.name)
        if field.aliasName in ["STRONG", "Orig_Length", "CII_Score_Overall"]:
            arcpy.AlterField_management("islands_with_score", field.name, field.aliasName)

    # Clean up
    remove_intermediary_layers(["buffered_islands","islands_with_CII_scores_table", "islands_with_score_with_nulls"])
    turn_off_layers(["islands_with_score"])
def select_top_n(input_feature, output_dir, sort_column='AREA', filter_column='dbZ', values=[], top_count=1):
    
    arcpy.env.workspace = "in_memory"
    arcpy.env.overwriteOutput = True
    
    # if dBZ values not given, choose uniques.
    if not values:
        with arcpy.da.SearchCursor(input_feature, [filter_column]) as cur:
            cur.reset()
            values = list(set([row[0] for row in cur]))
            
    # Iterate each levels, because we always have `AREA`, so can hard code it.
    for value in values:
        sub_level_dir = os.path.join(output_dir, str(value))
        if not os.path.exists(sub_level_dir):
            os.makedirs(sub_level_dir)
        with arcpy.da.SearchCursor(input_feature, [sort_column], where_clause="%s=%s" % (filter_column, str(value))) as cur:
            cur.reset()
            sort_field = list(reversed(sorted([row[0] for row in cur])))
            if not sort_field:
                continue
            if len(sort_field) < top_count:
                threshold = sort_field[-1]
            else:
                threshold = sort_field[top_count - 1]
            
        # Now output it
        output_name = arcpy.ValidateTableName(os.path.basename(input_feature) + "_" + filter_column)
        arcpy.Select_analysis(input_feature, os.path.join(sub_level_dir, output_name), where_clause='%s>=%s AND %s=%s' % (sort_column, str(threshold), filter_column, str(value)))
        print("Select top %d in %s -> %s" % (top_count, input_feature, output_name))
 def areaatt(clippedparcels, outline, attlyr, muniname, newname, newalias):
     
     # Take union- will retain info of each unioned piece. 
     attname = os.path.basename(os.path.normpath(attlyr))
     clippedparcelname = os.path.basename(os.path.normpath(clippedparcels))
     unionname = 'sites_union_' + attname
     arcpy.Union_analysis([clippedparcels, attlyr], unionname)
     
     # Find those that overlap BOTH
     exp1 = 'FID_' + clippedparcelname + ' <> -1 AND FID_' + attname + ' <> -1'
     filteredname = 'sites_' + attname + '_filtered'
     arcpy.Select_analysis(unionname, filteredname, exp1)
     
     # Calculate the area of each in acres
     exp = "float(!SHAPE.AREA@ACRES!)"
     arcpy.AddField_management(filteredname, 'areacalc', 'DOUBLE')
     arcpy.CalculateField_management(filteredname, 'areacalc', exp, "PYTHON_9.3")
     
     # Spatially join back to original parcels, maintaining "overlap_p"
     newlayer = addatt(clippedparcels, filteredname, muniname, 'areacalc', newname, newalias, method = 'CONTAINS')
     
     # Garbage collection!
     arcpy.Delete_management(unionname)
     arcpy.Delete_management(filteredname)
     
     return(newlayer)
def xy2events2shp():
    now = datetime.datetime.now()
    arcpy.AddMessage('--- Przetwarzanie danych XY Jaskinie do warstwy przestrzennej .shp [' + now.strftime("%Y/%m/%d %H:%M:%S") + '] ---')
    
    inputs = [oracleConnector+"\\JASKINIEPOLSKI.V_JASKINIE"]
    fieldX = ["X_1992"]
    fieldY = ["Y_1992"]
    events = ["jaskinie_events"]
    tempPaths = ["midas.gdb\\jaskinieTemp"]
    targetPaths = ["cbdg_srodowisko_jaskinie_"+today_]
    targetNames = ["cbdg_srodowisko_jaskinie_"+today_+".shp"]  
    
    #definicja ukladu wspolrzednych - EPSG 2180 - plik szukany w domyslnym katalogu instalacyjnym arcgis
    prjFile = os.path.join(arcpy.GetInstallInfo()["InstallDir"],"Coordinate Systems/Projected Coordinate Systems/National Grids/Europe/ETRS 1989 Poland CS92.prj")
    spatialRef = arcpy.SpatialReference(prjFile)
    
    i = 0
    for n in inputs:
        createSHPdir(SHPdirectory, targetPaths[i])
        arcpy.MakeXYEventLayer_management(n, fieldX[i], fieldY[i], events[i], spatialRef, "")
        arcpy.FeatureClassToFeatureClass_conversion (events[i], myPath+targetPaths[i], targetNames[i])
        arcpy.Select_analysis(events[i],myPath+tempPaths[i],"")
        arcpy.AddMessage('  --> Wyeksportowano warstwe ' + targetNames[i])
        shp_zip(targetPaths[i], myPath+targetPaths[i])
        i=i+1
def select_analysis(in_features, out_features, SQLClause):
    if not os.path.exists(os.path.dirname(out_features)):
        os.mkdir(os.path.dirname(out_features))
    if os.path.exists(out_features):
        return
    print "processing select analysis.."
    arcpy.Select_analysis(in_features, out_features, SQLClause)
def fixCurves(fc):
    arcpy.env.overwriteOutput = True
    print(
        "\tProcessing true curves in {0}... this will take awhile to complete"
    ).format(fc.name)
    whereOID, cntSource = getCurvy(fc.dataSource, True, False)
    if len(cntSource) == 1:
        whereOID = whereOID.replace(',', '')
    #arcpy.SelectLayerByAttribute_management(fc,"NEW_SELECTION",whereOID)
    #arcpy.CopyFeatures_management(fc,"curvy_" + fc.name.replace(" ","_"))
    arcpy.Select_analysis(fc.dataSource, "curvy_" + fc.name.replace(" ", "_"),
                          whereOID)
    expression, cntCopy = getCurvy(
        scratchWksp + "\curvy_" + fc.name.replace(" ", "_"), False, False)
    arcpy.Densify_edit(scratchWksp + "\curvy_" + fc.name.replace(" ", "_"),
                       "ANGLE", "200 Feet", "2 Feet", "10")
    arcpy.FeatureVerticesToPoints_management(
        scratchWksp + "\curvy_" + fc.name.replace(" ", "_"),
        scratchWksp + "\curvy_" + fc.name.replace(" ", "_") + "_Pnts", "ALL")
    arcpy.PointsToLine_management(
        scratchWksp + "\curvy_" + fc.name.replace(" ", "_") + "_Pnts",
        scratchWksp + "\\notCurvy_" + fc.name.replace(" ", "_"), "ORIG_FID")
    if getCurvy(scratchWksp + "\\notCurvy_" + fc.name.replace(" ", "_"), False,
                False):
        print("Something went horribly wrong! {0}").format(fc.name)
    flds = arcpy.ListFields(fc.dataSource)
    # use python list comprehension, removing list objects in a loop will return an error
    fldsList = [fld for fld in flds if fld.name not in passFlds]
    # a feature class may have only passFlds and script fails
    if fldsList:
        fldNames = []
        cnt = 1
        for f in fldsList:
            if cnt < len(fldsList):
                fldNames.append(f.name)
            elif cnt == len(fldsList):
                fldNames.append(f.name)
            cnt = cnt + 1
        fldNames = ';'.join(map(str, fldNames))
        if getShapeType(fc) == "Polyline":
            arcpy.TransferAttributes_edit(
                scratchWksp + "\curvy_" + fc.name.replace(" ", "_"),
                scratchWksp + "\\notCurvy_" + fc.name.replace(" ", "_"),
                fldNames, "1 Feet", "",
                "attTransfer" + fc.name.replace(" ", "_"))
            if fixTrueCurves:
                # delete coincident lines first due to ArcFM Feeder Mananger messages
                # append after delete or ArcFM Feeder Manager will present excessive messages
                arcpy.SelectLayerByAttribute_management(
                    fc, "NEW_SELECTION", whereOID)
                arcpy.DeleteFeatures_management(fc)
                arcpy.Append_management(
                    scratchWksp + "\\notCurvy_" + fc.name.replace(" ", "_"),
                    fc.dataSource, "NO_TEST")
                #pass
            else:
                pass
    print("{0}: {1} Copied: {2} notCurvy: {3}".format(fc.name, len(cntSource),
                                                      len(cntCopy),
                                                      len(curveList)))
def import_fc_to_sde(fc):
    print(" Processing Import --- %s " % fc)
    log.info("Processing %s for import to APEXWCD SDE Database" % fc)
    sde_fc = sde_fc_format.format(os.path.basename(fc))
    subdivs = r'C:\Users\Jlong\AppData\Roaming\ESRI\Desktop10.5\ArcCatalog\APEXPL.sde\APEXPL.PL.Subdivisions\APEXPL' \
              r'.PL.ApexDevelopment_Residential'
    #arcpy.DeleteField_management(fc, ["OBJECTID"])
    if os.path.basename(fc) == 'Parcels':
        log.info("Processing {} for import to APEXWCD SDE Database".format(
            sde_fc_format.format('WC_ParcelApxClipped')))
        arcpy.MakeFeatureLayer_management(fc, 'wake_lyr')
        arcpy.SelectLayerByLocation_management('wake_lyr', 'INTERSECT',
                                               exp_clipper)
        arcpy.Select_analysis('wake_lyr',
                              sde_fc_format.format('WC_ParcelApxClipped'))
        arcpy.SelectLayerByAttribute_management('wake_lyr', "CLEAR_SELECTION")
        arcpy.Delete_management("wake_lyr")
        log.info("Copied {} to APEXWCD SDE Database".format(
            sde_fc_format.format('WC_ParcelApxClipped')))
    elif os.path.basename(fc) == 'Jurisdictions':
        log.info("Processing %s with CURRENT Date FIELD" %
                 sde_fc_format.format('Jurisdictions'))
        format_time = datetime.date.fromtimestamp(os.path.getmtime(fgdb))
        arcpy.AddField_management(fc, "CURRENT", 'DATE', field_length=64)
        with arcpy.da.UpdateCursor(fc, ["CURRENT"]) as ucur:
            for row in ucur:
                row[0] = format_time
                ucur.updateRow(row)

    arcpy.CopyFeatures_management(fc, sde_fc)
    log.info("Copied {} to APEXWCD SDE Database".format(sde_fc))
    return
def create_state_parks():
    """
    创建每一个州以及每个州的总park的shp文件
    :return:
    """
    shp_file_list = glob.glob(
        os.path.join("D:/Dian/United States/State", "*.shp"))
    in_features = os.path.normcase("D:/Dian/United States/Parks (Local).lyr")
    out_put_dir = os.path.normcase("D:/NDVI APRSEP")
    for shp_file in shp_file_list:
        shp_file_name = os.path.split(shp_file)[-1]
        state_name = shp_file_name[:-4]
        out_put_path = os.path.join(out_put_dir, state_name)
        if os.path.exists(out_put_path) is False:
            os.mkdir(out_put_path)
        temp_out_features = os.path.join(out_put_path, state_name + " ParksOr")
        if os.path.exists(temp_out_features + ".shp") is False:
            arcpy.Clip_analysis(in_features, shp_file, temp_out_features)


#            print("%s is produced" % temp_out_features)
        out_features = os.path.join(out_put_path, state_name + " Parks")
        if os.path.exists(out_features + ".shp") is False:
            try:
                arcpy.Select_analysis(temp_out_features + ".shp", out_features,
                                      '"AREA" > 1')
            except:
                print("%s fails to be produced" % out_features)
                continue
            print("%s is produced" % out_features)
def clip_to_regions(regions_shp, project_output, overwrite):
    # make temp and list of management units
    regions = arcpy.CopyFeatures_management(regions_shp, 'in_memory/regions')
    regions_list = [row[0] for row in arcpy.da.SearchCursor(regions, 'NAME')]

    # clip projectwide output to each polygon in regions shapefile and name by region's name
    for region in regions_list:
        print 'Clipping projectwide ' + os.path.basename(
            project_output) + ' to ' + region
        # select shapefile of unit and make temp shapefile
        unit_shp = arcpy.Select_analysis(
            regions, 'in_memory/' + str(region.replace(' ', '')),
            "NAME = '%s'" % str(region))
        region_out = project_output.split('.')[0] + '_' + str(
            region.replace(' ', '')) + '.shp'
        if not os.path.exists(region_out) or overwrite is True:
            try:
                arcpy.Clip_analysis(project_output, unit_shp, region_out)
            except Exception as err:
                print 'Clipping ' + os.path.basename(
                    project_output
                ) + ' failed for ' + region + '. Error thrown was:'
                print err
        else:
            print 'SKIPPING ' + os.path.basename(
                region_out) + ': already exists'
def muni_addatts(inparcels, townpolys, muniname, muniname_caps):

    # Create clip boundary
    munioutline = AutoName(muniname + '_outline')
    arcpy.Select_analysis(townpolys, munioutline,
                          "town = '" + muniname_caps + "'")
    # Create a new file of parcels clipped to muni outline
    parcelmuniname = AutoName('clipparcels' + muniname)
    arcpy.AddMessage('Clipping parcels to ' + muniname + ' outline')
    print('Clipping parcels to ' + muniname + ' outline')
    arcpy.Clip_analysis(
        inparcels, munioutline, parcelmuniname
    )  # ORIGINALLY USE "parcels" the global name, not "inparcels", the passed fuction argument.
    # "parcels" was parcelswithloadvals3. Is this correct? Failed on Walpole.
    ''' Combine municipal parcels, each of which has one of the desired attributes, into a single feature class with all attributes.'''
    # Join together based on MAPC assigned ID
    arcpy.AddMessage(
        'Joining in each attribute to parcels based on MAPC parcel ID...')
    commonid = 'mapc_id'
    join_attrblyrs(score1, parcelmuniname, commonid, 'pri_pct')
    if score2: join_attrblyrs(score2, parcelmuniname, commonid, 'pri_pct')
    if score3: join_attrblyrs(score3, parcelmuniname, commonid, 'pri_pct')
    if score4: join_attrblyrs(score4, parcelmuniname, commonid, 'pri_pct')

    arcpy.Delete_management(munioutline)

    return (parcelmuniname)
Example #12
0
 def select_featureby_size(file_path, lake_size):
     file_dir = os.path.split(file_path)[0]
     feature = os.path.join(file_dir, 'RasterToPolygon_Select.shp')
     where_clause = 'POLY_AREA>=' + str(lake_size)
     print('Extracting lake areas larger than {} km^2'.format(lake_size))
     arcpy.Select_analysis(file_path, feature, where_clause)
     return feature
Example #13
0
def copyPipesegmenttoGascross():
    """
    本函数为将管段表中的穿跨越段连同其相关属性写入到穿跨越表中
    基本思路:
    首先从管段表中选出敷设方式为穿跨越的要素,因此要求填写管段表时,
        必须正确填写其敷设方式(如果为穿跨越必须将其敷设方式设置为穿越或跨越)
    然后将这些要素连同其属性添加到穿跨越表中
    """
    #从管段表中选择敷设方式为穿越/跨越的要素
    arcpy.Select_analysis("T_PN_PIPESEGMENT_GEO","ForCross","LAYMODE = 4 OR LAYMODE = 5")

    #新建字段映射列表对象
    fieldMappings = arcpy.FieldMappings()     #创建字段映射对象
    fieldTuple=("PIPENAME","PSCODE","PIPESEGNO","MSTART","MEND","USEDDATE","REFOBJSTART",\
                "OFFSETSTART","XSTART","YSTART","ZSTART","REFOBJEND","OFFSETEND",\
                "XEND","YEND","ZEND","CONSTRUNIT","SUPERVISORUNIT","TESTUNIT",\
                "FDNAME","INPUTDATETIME","COLLECTUNIT","COLLECTDATE","NAME")
    for FT in fieldTuple:
        if FT!="PSCODE" and FT!="PIPESEGNO":
            #新建字段映射对象
            fm=arcpy.FieldMap()        
            #将源字段添加入字段映射中
            fm.addInputField("ForCross",FT)
            #设置目标字段映射
            fm_name=fm.outputField
            fm_name.name = FT
            fm.outputField = fm_name
            #将字段映射放入字段映射列表中
            fieldMappings.addFieldMap(fm)
        elif FT=="PSCODE":
            #新建字段映射对象
            fm=arcpy.FieldMap()        
            #将源字段添加入字段映射中
            fm.addInputField("ForCross","CODE")
            #设置目标字段映射
            fm_name=fm.outputField
            fm_name.name = FT
            fm.outputField = fm_name
            #将字段映射放入字段映射列表中
            fieldMappings.addFieldMap(fm)
        elif FT=="PIPESEGNO":
            #新建字段映射对象
            fm=arcpy.FieldMap()        
            #将源字段添加入字段映射中
            fm.addInputField("ForCross","NAME")
            #设置目标字段映射
            fm_name=fm.outputField
            fm_name.name = FT
            fm.outputField = fm_name
            #将字段映射放入字段映射列表中
            fieldMappings.addFieldMap(fm)
            
    #将筛选出的穿跨越管段,添加到穿跨越表中
    arcpy.Append_management("ForCross","T_LP_GASCROSS_GEO","NO_TEST",fieldMappings,"")

    #删除中间文件
    arcpy.Delete_management("ForCross")

    #对穿跨越进行编码
    featureCoding("T_LP_GASCROSS_GEO")
Example #14
0
def Split_Line_By_Vertex(aoi_line):

    Multi_to_single(aoi_line)
    New_Line  = aoi_line + '_Temp'
    save_name = aoi_line

    arcpy.Select_analysis(aoi_line, New_Line, "\"OBJECTID\" < 0")
    iCursor = arcpy.da.InsertCursor(New_Line, ["SHAPE@"])
    with arcpy.da.SearchCursor(aoi_line,["SHAPE@"]) as sCursor:
        for row in sCursor:
            for part in row[0]:
                prevX = None
                prevY = None
                for pnt in part:
                    if pnt:
                        if prevX:
                            array = arcpy.Array([arcpy.Point(prevX, prevY),
                                                arcpy.Point(pnt.X, pnt.Y)])
                            polyline = arcpy.Polyline(array)
                            iCursor.insertRow([polyline])
                        prevX = pnt.X
                        prevY = pnt.Y
                    else:
                        pass

    del iCursor

    arcpy.Delete_management                (aoi_line)
    arcpy.Rename_management                (New_Line,save_name)
Example #15
0
def sample_road_points():
    fld_pts = '{}fld_pts_rd_data.shp'.format(gis_proj_dir)
    rd_pts = '{}rd_far_fld.shp'.format(gis_proj_dir)
    fld_pt_df = read_shapefile_attribute_table(fld_pts)
    rd_pts_df = read_shapefile_attribute_table(rd_pts)
    cls = fld_pt_df.groupby('VDOT')['count'].sum().sort_values(ascending=False)
    cls = cls / cls.sum() * 100
    cls = get_rd_classes(rd_pts)
    num_samples = (cls * 750 / 100).round()

    l = []
    for c, n in num_samples.iteritems():
        d = rd_pts_df[rd_pts_df['VDOT'] == c]
        if d.shape[0] > n:
            idx = d.sample(n=int(n)).index
        else:
            print "there are too few points. only {} when it's asking for {} for VDOT {}".format(
                d.shape[0],
                n,
                c
            )
            idx = d.index
        l.append(pd.Series(idx))
    sampled = pd.concat(l)
    out_file_name = 'sampled_road_pts.shp'
    where_clause = '"FID" IN ({})'.format(",".join(map(str, sampled.tolist())))
    print "Select_analysis"
    arcpy.Select_analysis(rd_pts, out_file_name, where_clause)
    return sampled
def estimateTotalLineLengthInPolygons(fcLine, fcClipPolygon, polygonIDFieldName, clipPolygonID, geodatabase = "assignment2.gdb"):
    #test for existence of data types
    if arcpy.Exists(geodatabase):
        #set workspace to user input geodatabase
        arcpy.env.workspace = geodatabase
        print("Environment workspace is set to: ", geodatabase)
    else:
        print("Workspace", geodatabase, "does not exist!")
        sys.exit(1)
        #use try to identify errors in the types of data  
    try:
        #use the describe function to determine the element data type
        desc_fcLine = arcpy.Describe(fcLine)
        desc_fcClipPolygon = arcpy.Describe(fcClipPolygon)
        if desc_fcLine.shapeType != "Polyline":
            print("Error shapeType: ", fcLine, "needs to be a polyline type!")
            sys.exit(1)
        if desc_fcClipPolygon.shapeType != "Polygon":
            print("Error name: ", fcClipPolygon, "needs to be a polygon type!")
            sys.exit(1)
    #Transform the projection of one to other if the line and polygon shapefiles have different projections
    if desc_fcLine.spatialReference.name != desc_fcClipPolygon.spatialReference.name:
            print("Coordinate system error: Spatial reference of", fcLine, "and", fcClipPolygon, "should be the same.")
            sys.exit(1)            
    #identify input coordinate system unit of measurement
    if desc.spatialReference.linearUnitName != "miles":
        print("Error: coordinate system unit measurement needs to be in miles!")
    #create output file with the same name as existing file by overwriting it
    arcpy.env.overwriteOutput = True
    #list feature classes
    fcList = arcpy.ListFeatureClasses()
    for fc in fcList:
        print(fc)
    #define variables
    fcLine = river_network.shp
    fcClipPolygon = states.shp
    polygonIDFieldName = Iowa
    clipPolygonID = rivers
    #id or name field that could uniquely identify a feature in the polygon feature class
    arcpy.AddField_management(polygonIDFieldName, "geoid", "TEXT")
    #Create update cursor for feature class 
    with arcpy.da.UpdateCursor(fcClipPolygon, ["Field1", "geoid"]) as cursor:
        # update geoid using Field1
        for row in cursor:
            field1_list = row[0].split(", ")
            greater_list = field1_list[-1].split("> ")
            geoid_str = ""
            for item in greater_list:
                colon_list = item.split(":")
                geoid_str += colon_list[1]
            #print(geoid_str)
            row[1] = geoid_str
            cursor.updateRow(row)
    arcpy.AddField_management(polygonIDFieldName, "length", "DOUBLE")
    #select attributes
    arcpy.Select_analysis(fcLine, fcClipPolygon, clipPolygonID)
    #clips the linear feature class by the selected polygon boundary,
    arcpy.Clip_analysis(clipPolygonID, bufferOutput, clipPolygonID)
    #calculates and returns the total length of the line features (e.g., rivers) in miles for the selected polygon
    arcpy.CalculateGeometryAttributes_management(polygonIDFieldName, [["length", "LENGTH_GEODESIC"]], "MILES_US")
def main():

    #  environment settings
    arcpy.env.workspace = 'in_memory'  # set workspace to temporary workspace
    arcpy.env.overwriteOutput = True  # set to overwrite output

    # copy nhd layers we need for brat into temporary workspace
    huc8s = arcpy.CopyFeatures_management(huc8_aoi_path, 'in_memory/huc8s')

    # create list of all huc 8 ids
    huc8s_list = [row[0] for row in arcpy.da.SearchCursor(huc8s, 'HUC8')]

    # for each huc 8 id in the list....
    for huc8 in huc8s_list:
        # compress HUC8 name
        huc8_shp = arcpy.Select_analysis(huc8s, 'in_memory/huc8_' + str(huc8),
                                         "HUC8 = '%s'" % str(huc8))
        huc8_name = str(arcpy.da.SearchCursor(huc8_shp, ['NAME']).next()[0])
        huc8_name_new = re.sub(r'\W+', '', huc8_name)

        # print subdir name to track script progress for user
        print 'Creating subfolder for: ' + str(huc8_name)

        # create HUC8 subfolder if it doesn't already exist
        huc8_folder = os.path.join(out_path, huc8_name_new + '_' + str(huc8))
        if not os.path.exists(huc8_folder):
            os.makedirs(huc8_folder)
Example #18
0
def grafo2(edificio):

    auxiliar4 = root + "\\AUXILIAR4"
    os.mkdir(auxiliar4)

    outputb = "AUXILIAR2\\" + str(edificio) + "_4.dbf"
    row2s = arcpy.SearchCursor(outputb)
    for row2 in row2s:
        output = "AUXILIAR2\\" + str(edificio) + "_4.shp"
        output2 = "AUXILIAR4\\" + str(row2.FID) + "_1.shp"
        clause = '"FID" = ' + str(row2.FID)
        arcpy.Select_analysis(output, output2, clause)

        output = "AUXILIAR2\\" + str(edificio) + "_2.shp"
        output3 = "AUXILIAR4\\" + str(row2.FID) + "_3.shp"
        arcpy.Merge_management([output2, output], output3)

        output = "AUXILIAR4\\" + str(row2.FID) + "_4.shp"
        arcpy.PointsToLine_management(output3, output)

        output2 = "AUXILIAR2\\" + str(edificio) + "_5.shp"
        output3 = "AUXILIAR4\\" + str(edificio) + "_5.shp"
        if row2.FID == 0:
            arcpy.Copy_management(output, output2)
        else:
            arcpy.Merge_management([output2, output], output3)
            arcpy.Copy_management(output3, output2)
    shutil.rmtree(auxiliar4)
Example #19
0
def select_analysis(in_features, out_features, sql_clause):
    path_check(out_features)
    if path_check(out_features):
        print("{} has already been selected".format(in_features))
        return
    print "Selecting analysis {}".format(in_features)
    arcpy.Select_analysis(in_features, out_features, sql_clause)
Example #20
0
def makeFeature(in_file, out_name, logFile, filtList):

    try:
        # Create new log file
        if logFile == 'true':
            strRem = ','.join(str(e[0])
                              for e in filtList)  # create csv string of fid's
            log_name = out_name + '_log'
            where = '{0}({1})'.format('"feature_id" IN ', strRem)
            logFile = logFilter(in_file, log_name, where, filtList)
            # Create new FC with 'OK' locations
            where = '{0}({1})'.format('"feature_id" NOT IN ', strRem)
            out_file = arcpy.Select_analysis(in_file, out_name, where)
        else:
            # Create new FC, add filter fields
            out_file = arcpy.Select_analysis(in_file, out_name)
            #out_file = arcpy.CopyFeatures_management(in_file, out_name)
            f_utils.addLogFields(out_file)
            logFields = ["feature_id", "filtername", "filterparam"]
            # Update filter fields
            with arcpy.da.UpdateCursor(out_file, logFields) as uCur:
                for row in uCur:
                    # find the matching feature_id in filtList
                    rowD = [item for item in filtList if item[0] == row[0]]
                    if rowD:  # rowD => [[12345,x,y]]
                        row[1:] = rowD[0][1:]
                    else:
                        row[1:] = ['OK', '']
                    uCur.updateRow(row)

    except arcpy.ExecuteError:
        msgs = arcpy.GetMessages(2)
        arcpy.AddError(msgs)
        print(msgs)

    except:
        tb = sys.exc_info()[2]
        tbinfo = traceback.format_tb(tb)[0]
        pymsg = "PYTHON ERRORS:\nTraceback info:\n" + tbinfo + "\nError Info:\n" + str(
            sys.exc_info()[1])
        msgs = "ArcPy ERRORS:\n" + arcpy.GetMessages(2) + "\n"
        arcpy.AddError(pymsg)
        arcpy.AddError(msgs)
        print(pymsg)
        print(msgs)

    return ()
Example #21
0
def DefineUnsuitableSlopes():
    # Get NED data
    df = pd.read_csv(ned_list_csv)
    download_urls = list(df['downloadURL'])
    dem_files = []
    for download_url in download_urls:
        # Create output file name variables
        file_name = os.path.split(download_url)[1]
        # print(file_name),
        # print(datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S'))
        new_download = os.path.join(scratch_space, file_name)
        # Download data
        if not os.path.exists(new_download):
            if sys.version_info[0] == 2:
                urllib.urlretrieve(download_url, new_download)
            else:
                urllib.request.urlretrieve(download_url, new_download)
        dem_files.append(new_download)

    # Create unsuitable slopes and contours
    unsuitable_slope_list = []
    contours_list = []
    for in_dem in dem_files:
        print(in_dem),
        print(datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S'))
        outUnsuitSlope = state_gdb + "\{0}_UnsuitableSlopes".format(
            os.path.split(in_dem)[1][:-4])
        ## Create Unsuitable Slope Files by region
        CreateUnsuitableSlopes(in_dem, outUnsuitSlope)
        unsuitable_slope_list.append(outUnsuitSlope)
        ## Create Contour Files by region
        outContour = "Physical\Contours_{0}".format(
            os.path.split(in_dem)[1][:-4])
        create_contours(in_dem, outContour, contourInterval)
        contours_list.append(outContour)

    # Subset Unsuitable slope data by county
    for county in county_list:
        merge_list = []
        delete_list = []
        arcpy.Select_analysis(county_fc, phi,
                              "COUNTY_NAME = '{}'".format(county))
        for unsuit_slope in unsuitable_slope_list:
            arcpy.Intersect_analysis([phi, unsuit_slope], unsuit_slope + phi)
            print(
                int(
                    arcpy.GetCount_management(unsuit_slope +
                                              phi).getOutput(0)))
            if int(arcpy.GetCount_management(unsuit_slope +
                                             phi).getOutput(0)) > 0:
                merge_list.append(unsuit_slope + phi)
            delete_list.append(unsuit_slope + phi)
        arcpy.Merge_management(
            merge_list, state_gdb +
            "\Physical\{0}_UnsuitableSlopes".format(county.replace(" ", "")))
        for i in delete_list:
            arcpy.Delete_management(i)

    arcpy.Delete_management(phi)
Example #22
0
def createComplainRasterFeature(SelectSQL,InputComplainFeatures,POIFeatures,FinalResultFeature):
    logging.info("Process: 创建"+FinalResultFeature)
    if(arcpy.Exists(FinalResultFeature)):
        arcpy.Delete_management(FinalResultFeature, "FeatureClass")
    rmNo = random.randint(100000000,999999999)
    print rmNo
    # Process: 筛选
    print "Process: 筛选"
    logging.info("Process: 筛选")
    FeatureSelect=arcpy.Select_analysis(InputComplainFeatures, "in_memory/FeatureSelect"+repr(rmNo), SelectSQL)
    # Process: 点转栅格
    print FeatureSelect
    rowSear =  arcpy.SearchCursor(FeatureSelect)
    row = rowSear.next()
    if(row):
        print "Process: 点转栅格"
        logging.info("Process: 点转栅格")
        tempEnvironment0 = arcpy.env.extent
        arcpy.env.extent = "115 23 122 29"
        ResultRaster=arcpy.PointToRaster_conversion(FeatureSelect, "OBJECTID", "in_memory/ResultRaster"+repr(rmNo), "COUNT", "NONE", ".0018")
        arcpy.env.extent = tempEnvironment0
        # Process: 栅格转点 
        print "Process: 栅格转点"
        logging.info("Process: 栅格转点")
        COMPLAIN_RASTER_POINTS=arcpy.RasterToPoint_conversion(ResultRaster, "in_memory/COMPLAIN_RASTER_POINTS"+repr(rmNo), "VALUE")
        print "Process: 空间连接"
        # Process: 空间连接
        COMPLAIN_POI_UNION=arcpy.SpatialJoin_analysis(COMPLAIN_RASTER_POINTS, POI, "in_memory/COMPLAIN_POI_UNION"+repr(rmNo), "JOIN_ONE_TO_ONE", "KEEP_ALL", "","CLOSEST", ".1 DecimalDegrees", "DISTANCE")
        print "Process: 点转栅格 (2)"
        logging.info("Process: 点转栅格 (2)")
        # Process: 点转栅格 (2)
        tempEnvironment0 = arcpy.env.extent
        arcpy.env.extent = "115 23 122 29"
        ResultRaster2=arcpy.PointToRaster_conversion(COMPLAIN_POI_UNION, "OBJECTID", "in_memory/ResultRaster2"+repr(rmNo), "MOST_FREQUENT", "NONE", ".0018")
        arcpy.env.extent = tempEnvironment0
        print "Process: 栅格转面"
        logging.info("Process: 栅格转面")
        # Process: 栅格转面
        ResultFeature=arcpy.RasterToPolygon_conversion(ResultRaster2, "in_memory/ResultFeature"+repr(rmNo), "NO_SIMPLIFY", "VALUE")
        print "Process: 空间连接 (2)"
        logging.info("Process: 空间连接 (2)")
        # Process: 空间连接 (2)
        ResultFeatureZj=arcpy.SpatialJoin_analysis(ResultFeature, COMPLAIN_POI_UNION, "in_memory/ResultFeatureZj"+repr(rmNo), "JOIN_ONE_TO_ONE", "KEEP_ALL", "", "INTERSECT", "", "")
        # Process: 空间连接 (3)
        arcpy.SpatialJoin_analysis(FeatureSelect, ResultFeatureZj, FinalResultFeature, "JOIN_ONE_TO_ONE", "KEEP_ALL", "", "INTERSECT", "", "")
        #arcpy.SpatialJoin_analysis(FeatureSelect, ResultFeatureZj, FinalResultFeature, "JOIN_ONE_TO_ONE", "KEEP_ALL", "TIME \"TIME\" true true false 8 Date 0 0 ,First,#,D:\\HasmbyGis\\Cache.gdb\\GIS_OBJECT_COMPLAIN_Select1,TIME,-1,-1;WORK_ORDER_ID \"WORK_ORDER_ID\" true true false 100 Text 0 0 ,First,#,D:\\HasmbyGis\\Cache.gdb\\GIS_OBJECT_COMPLAIN_Select1,WORK_ORDER_ID,-1,-1;DISTANCE \"DISTANCE\" true true false 8 Double 0 0 ,First,#,D:\\HasmbyGis\\Cache.gdb\\Complain20140509_SpatialJoin,DISTANCE,-1,-1;POINTID \"POINTID\" true true false 4 Long 0 0 ,First,#,D:\\HasmbyGis\\Cache.gdb\\Complain20140509_SpatialJoin,POINTID,-1,-1;GRID_CODE \"聚合数\" true true false 4 Long 0 0 ,First,#,D:\\HasmbyGis\\Cache.gdb\\Complain20140509_SpatialJoin,GRID_CODE,-1,-1;Name \"聚合地址\" true true false 160 Text 0 0 ,First,#,D:\\HasmbyGis\\Cache.gdb\\Complain20140509_SpatialJoin,Name,-1,-1;Ctype \"聚合地址类型(原始)\" true true false 64 Text 0 0 ,First,#,D:\\HasmbyGis\\Cache.gdb\\Complain20140509_SpatialJoin,Ctype,-1,-1;CnType \"聚合地址类型\" true true false 50 Text 0 0 ,First,#,D:\\HasmbyGis\\Cache.gdb\\Complain20140509_SpatialJoin,CnType,-1,-1;CITY \"地市\" true true false 32 Text 0 0 ,First,#,D:\\HasmbyGis\\Cache.gdb\\Complain20140509_SpatialJoin,CITY,-1,-1;COUNTY \"区县\" true true false 32 Text 0 0 ,First,#,D:\\HasmbyGis\\Cache.gdb\\Complain20140509_SpatialJoin,COUNTY,-1,-1;GRID \"GRID\" true true false 32 Text 0 0 ,First,#,D:\\HasmbyGis\\Cache.gdb\\Complain20140509_SpatialJoin,GRID,-1,-1;SGLON \"栅格POI经度\" true true false 8 Double 0 0 ,First,#,D:\\HasmbyGis\\Cache.gdb\\Complain20140509_SpatialJoin,SGLON,-1,-1;SGLAT \"栅格POI纬度\" true true false 8 Double 0 0 ,First,#,D:\\HasmbyGis\\Cache.gdb\\Complain20140509_SpatialJoin,SGLAT,-1,-1;CQ_REGION \"城区网格所属区域\" true true false 60 Text 0 0 ,First,#,D:\\HasmbyGis\\Cache.gdb\\Complain20140509_SpatialJoin,CQ_REGION,-1,-1;CQ_REGION_TYPE \"城区网格区域属性\" true true false 60 Text 0 0 ,First,#,D:\\HasmbyGis\\Cache.gdb\\Complain20140509_SpatialJoin,CQ_REGION_TYPE,-1,-1;TEST_ID \"测试网格ID\" true true false 10 Text 0 0 ,First,#,D:\\HasmbyGis\\Cache.gdb\\Complain20140509_SpatialJoin,TEST_ID,-1,-1;TEST_GRIDID \"测试网格编号\" true true false 20 Text 0 0 ,First,#,D:\\HasmbyGis\\Cache.gdb\\Complain20140509_SpatialJoin,TEST_GRIDID,-1,-1;TEST_CLASS \"测试网格类型\" true true false 10 Text 0 0 ,First,#,D:\\HasmbyGis\\Cache.gdb\\Complain20140509_SpatialJoin,TEST_CLASS,-1,-1", "INTERSECT", "", "")

        
        arcpy.Delete_management(COMPLAIN_POI_UNION)
        arcpy.Delete_management(COMPLAIN_RASTER_POINTS)
        arcpy.Delete_management(ResultRaster)
        arcpy.Delete_management(ResultRaster2)
        arcpy.Delete_management(ResultFeature)
        arcpy.Delete_management(ResultFeatureZj)
        del COMPLAIN_POI_UNION,COMPLAIN_RASTER_POINTS,ResultRaster,ResultRaster2,ResultFeature,ResultFeatureZj
    arcpy.Delete_management(FeatureSelect)
    del FeatureSelect,rowSear
    logging.info("清理内存~~")
    gc.collect()
Example #23
0
    def create_layer_ate(self):
        """
        create layers ATE (boundaris of settlements) an Selsovets (boundaries of Selsovets) from layer .shp (rb.shp, coordinate system -  geografic CS Pulkovo-1942)
        :param path_to_layer_ate: path where we put layer rb.shp in GCS Pulkovo-1942, which containes boundaries of Selsovets and settlements
        :param name_layer_ate: name layer (.shp), which containes boundaries of Selsovets and settlements
        :return: two layer: ATE and Selsovets in DateBase, WGS-84
        """
        shp = r'{0}\{1}1.shp'.format(self.path_to_layer_ate,
                                     self.name_district)
        shp_sk = r'{0}\{1}_sk.shp'.format(self.path_to_layer_ate,
                                          self.name_district)
        shp_city = r'{0}\ATE.shp'.format(self.path_to_layer_ate)
        shp_ss = r'{0}\Selsovets.shp'.format(self.path_to_layer_ate)
        tempEnvironment0 = arcpy.env.outputCoordinateSystem
        arcpy.env.outputCoordinateSystem = sk_42

        arcpy.Select_analysis(
            os.path.join(self.path_to_layer_ate, self.name_layer_ate), shp,
            "\"SOATO\" LIKE '{0}' OR \"SOATO\" LIKE '{1}'".format(
                district_soato[self.name_district][0],
                district_soato[self.name_district][1]))
        arcpy.env.outputCoordinateSystem = tempEnvironment0

        arcpy.Project_management(shp, shp_sk, wgs84, "CK42_to_ITRF2005", sk_42)

        arcpy.Select_analysis(
            shp_sk, shp_city,
            "CATEGORY = 111 OR CATEGORY = 112 OR CATEGORY= 121 OR CATEGORY= 113 OR CATEGORY= 123 OR CATEGORY = 213 OR CATEGORY= 221 OR CATEGORY= 223 OR CATEGORY= 222 OR CATEGORY= 231 OR CATEGORY= 232 OR CATEGORY= 234 OR CATEGORY= 235 OR CATEGORY= 239"
        )
        arcpy.Select_analysis(shp_sk, shp_ss, "\"CATEGORY\" =103")

        for root, dirs, files in os.walk(self.path_to_layer_ate):
            for file in files:
                if file.find('1') > -1 or file.find('_sk') > -1:
                    os.remove('{0}\{1}'.format(self.path_to_layer_ate, file))
        try:
            arcpy.FeatureClassToGeodatabase_conversion(
                "{0};{1}".format(shp_city, shp_ss), self.nameDataSet)
        except:
            print "This layer's been in DateBase yet"

        # удаление шейпов ATE, Selsovets
        for root, dirs, files in os.walk(self.path_to_layer_ate):
            for file in files:
                if file.find('ATE') > -1 or file.find('Selsovets') > -1:
                    os.remove('{0}\{1}'.format(self.path_to_layer_ate, file))
Example #24
0
def select_by_attr(inShp, sql, outShp):
    """
    Select data by attributes and write it to file
    """

    arcpy.Select_analysis(inShp, outShp, sql)

    return outShp
Example #25
0
def make_basin_shapefile():
    sw_strct = '{}/Stormwater Infrastructure/sw_structures_joined_pipes.shp'.format(gis_proj_dir)
    basin_codes = filter_sw_struc_codes('basin')
    where_clause = '"Structure1" IN (\'{}\')'.format("','".join(map(str, basin_codes)))
    out_file_name = '{}/Stormwater Infrastructure/sw_struct_basins.shp'.format(gis_proj_dir)
    print "Select_analysis"
    arcpy.Select_analysis(sw_strct, out_file_name, where_clause)
    return out_file_name
Example #26
0
def Calc_Area(lyr, ws, gdb):
    def math_delta_rashum(area_rashum):
        area_rashum = float(area_rashum)
        delta1 = (0.3 * (math.sqrt(area_rashum)) + (0.005 * area_rashum))
        delta2 = (0.8 * (math.sqrt(area_rashum)) + (0.002 * area_rashum))
        if delta1 > delta2:
            delta = delta1
        else:
            delta = delta2
        return delta

    def find_problem(Area_rasum, Shape_area, delta):
        minus = abs(Area_rasum - Shape_area)
        if minus > delta:
            return 'Warning, Delta is to big'
        else:
            return 'Ok'

    cut_bankal = ws + '\\' + 'cut_bankal'
    tazar_copy = ws + '\\' + 'PARCELS_inProc_edit_copy'
    error_polygon = gdb + '\\' + 'Errors_Polygon'

    deleteErrorCode(error_polygon, ["10"])

    feat_name = 'lyr_layer' + str(uuid.uuid4())[::5]
    if arcpy.Exists(cut_bankal):
        arcpy.Delete_management(cut_bankal)

    arcpy.MakeFeatureLayer_management(lyr, feat_name,
                                      "\"LEGAL_AREA\" IS NOT NULL")
    arcpy.SelectLayerByLocation_management(feat_name, "INTERSECT", tazar_copy,
                                           '100 Meters')
    arcpy.Select_analysis(feat_name, cut_bankal)

    fields = [["GAP", "DOUBLE"], ["delta", "DOUBLE"], ["Check", "TEXT"]]
    for i in fields:
        try:
            arcpy.AddField_management(cut_bankal, i[0], i[1])
        except:
            pass

    with arcpy.da.UpdateCursor(
            cut_bankal,
        ["LEGAL_AREA", "SHAPE_Area", "GAP", "delta", "Check"]) as up_cursor:
        for row in up_cursor:
            delta = math_delta_rashum(row[0])
            row[3] = delta
            row[2] = abs(row[1] - row[0]) - delta
            row[4] = find_problem(row[0], row[1], delta)
            up_cursor.updateRow(row)
    del up_cursor

    feat_lyr = 'cut_bankal_del' + str(uuid.uuid4())[::5]
    arcpy.MakeFeatureLayer_management(cut_bankal, feat_lyr, "\"Check\" = 'Ok'")
    arcpy.DeleteFeatures_management(feat_lyr)

    Calc_field_value_error(cut_bankal, error_polygon, "10",
                           ErrorDictionary["10"])
Example #27
0
def point_Not_in_bankal_or_moded(parcel_all_final, ws):

    pnt_New = make_polygon_to_point(parcel_all_final)

    Point = r'in_memory' + '\\' + 'Point'
    Pnt_to_del = r'in_memory' + '\\' + 'Point_to_delete'
    error_point = ws + '\\' + "Errors_Point"
    pnt_Old = ws + '\\' + 'PARCEL_NODE_EDIT_copy'
    node_moded = ws + '\\' + 'node_tazar'
    parcel_bankal = ws + '\\' + 'PARCEL_ALL_EDIT_copy'
    parcel_modad = ws + '\\' + 'PARCELS_inProc_edit_copy'

    deleteErrorCode(error_point, ["6"])

    bankal_vertex = make_polygon_to_point(parcel_bankal)
    modad_vertex = make_polygon_to_point(parcel_modad)

    arcpy.MakeFeatureLayer_management(pnt_New, 'pnt_New_lyr')
    arcpy.SelectLayerByLocation_management('pnt_New_lyr', 'INTERSECT',
                                           node_moded, '0.1 Meters',
                                           "NEW_SELECTION", 'INVERT')
    arcpy.Select_analysis('pnt_New_lyr', Point)

    arcpy.MakeFeatureLayer_management(Point, 'Point_lyr')
    arcpy.SelectLayerByLocation_management('Point_lyr', 'INTERSECT', pnt_Old,
                                           '0.1 Meters')
    arcpy.SelectLayerByAttribute_management('Point_lyr', "SWITCH_SELECTION")
    arcpy.Select_analysis('Point_lyr', Pnt_to_del)

    # del points from bankal that are not in Node bankal
    arcpy.MakeFeatureLayer_management(Pnt_to_del, 'Pnt_to_del_lyr')
    arcpy.SelectLayerByLocation_management('Pnt_to_del_lyr', 'INTERSECT',
                                           bankal_vertex, '0.1 Meters',
                                           "NEW_SELECTION")
    arcpy.DeleteFeatures_management('Pnt_to_del_lyr')

    # del points that on modad tazar but not in his nodes
    arcpy.MakeFeatureLayer_management(Pnt_to_del, 'Pnt_to_del_lyr2')
    arcpy.SelectLayerByLocation_management('Pnt_to_del_lyr2', 'INTERSECT',
                                           modad_vertex, '0.1 Meters',
                                           "NEW_SELECTION")
    arcpy.DeleteFeatures_management('Pnt_to_del_lyr2')

    Calc_field_value_error(Pnt_to_del, error_point, "6", ErrorDictionary["6"])
    arcpy.Delete_management(bankal_vertex)
def selectPolygonsFromOriginalData(featureClass, stringListOfIDs, outputName,
                                   workspace):
    '''Using values from the Unique_ID field performs
    an ESRI select analysis. Where the values are selected
    and those data are exported as a new file.
    '''
    outPutPath = os.path.join(workspace, outputName)
    arcpy.Select_analysis(featureClass, outPutPath,
                          'ADS_OBJECTID IN ({})'.format(stringListOfIDs))
Example #29
0
def select_features_by_season(input_features, selected_seasons, output_name):
    start = time.time()
    if len(selected_seasons) > 1:
        qry = """time IN {0}""".format(str(selected_seasons))
    else:
        qry = "time" + "= '" + str(selected_seasons[0]) + "'"
    arcpy.Select_analysis(input_features, output_name, qry)
    end = time.time()
    print("Time for select_features_by_season: " + str(end - start))
Example #30
0
def fc_stats():
    outSheet = outWorkbook.add_worksheet(fc[0:30])
    outSheet.set_column(0, 4, 15)
    totalRows = arcpy.GetCount_management(fc)
    spatialRef = arcpy.Describe(fc).spatialReference
    fields = arcpy.ListFields(fc)
    stats_fields = []
    out_geom = "memory" + "\\" + str(fc) + "_" + "geom"
    arcpy.management.CheckGeometry(fc, out_geom)
    totalGeom = arcpy.management.GetCount(out_geom)
    output = "memory" + "\\" + str(fc)
    outSheet.write(0, 0, "NAME")
    outSheet.write(0, 1, fc)
    outSheet.write(1, 0, "TYPE")
    outSheet.write(1, 1, "Feature Class")
    outSheet.write(2, 0, "GCS name")
    outSheet.write(2, 1, spatialRef.name)
    outSheet.write(3, 0, "GCS type")
    outSheet.write(3, 1, spatialRef.type)
    outSheet.write(4, 0, "ROWS")
    outSheet.write(4, 1, int(str(totalRows)))
    outSheet.write(5, 0, "FIELDS")
    outSheet.write(5, 1, int(str(len(fields))))
    outSheet.write(6, 0, "GEOM ERROR")
    outSheet.write(6, 1, int(str(totalGeom)))
    outSheet.write(8, 0, "FIELD")
    outSheet.write(8, 1, "ALIAS")
    outSheet.write(8, 2, "TYPE")
    outSheet.write(8, 3, "COUNT NULL")
    outSheet.write(8, 4, "COUNT BLANK")
    arcpy.management.Delete(out_geom)
    for field in fields:
        if field.type not in ("OID", "Geometry"):
            outSheet.write(fields.index(field) + 7, 0, field.name)
            outSheet.write(fields.index(field) + 7, 1, field.aliasName)
            outSheet.write(fields.index(field) + 7, 2, field.type)
            stats_fields.append([field.name, "COUNT"])
        if field.type not in ("OID", "Geometry", "Double", "Integer",
                              "SmallInteger", "Single"):
            out_fc = "memory" + "\\" + str(fc) + "_" + str(field.name)
            expression = str(field.name) + ' IN (\'\', \' \')'
            arcpy.Select_analysis(fc, out_fc, expression)
            totalBlank = arcpy.GetCount_management(out_fc)
            if int(str(totalBlank)) > 0:
                outSheet.write(
                    fields.index(field) + 7, 4, int(str(totalBlank)))
            arcpy.management.Delete(out_fc)
    arcpy.Statistics_analysis(fc, output, stats_fields)
    fieldsOutput = arcpy.ListFields(output)
    for field in fieldsOutput:
        with SearchCursor(output, [field.name]) as cursor:
            for row in cursor:
                if fieldsOutput.index(field) > 1:
                    outSheet.write(
                        fieldsOutput.index(field) + 7, 3,
                        int(totalRows[0]) - row[0])
    arcpy.management.Delete(output)