Exemplo n.º 1
0
def ListWorkspaceContentsAndMetadata(workspace):
    """Generator function that lists the contents of the geodatabase including those within feature datasets.
       Certain metadata elements are also listed. Only geodatabases are supported, not folder workspaces."""

    if not arcpy.Exists(workspace):
        raise ValueError("Workspace %s does not exist!" % workspace)

    desc = arcpy.Describe(workspace)
    if desc.dataType == 'FeatureDataset':
        validationWorkspace = os.path.dirname(workspace)
        fdsName = arcpy.ParseTableName(
            desc.name, validationWorkspace
        ).split(",")[2].strip(
        )  # Get the short name of the feature dataset (sans database/owner name)
    else:
        validationWorkspace = workspace
        fdsName = ""

    if not desc.dataType in ['Workspace', 'FeatureDataset']:
        if not hasattr(desc, "workspaceType") or not desc.workspaceType in [
                "LocalDatabase", "RemoteDatabase"
        ]:
            raise ValueError("Workspace %s is not a geodatabase!" % workspace)

    children = desc.children
    if desc.dataType == 'FeatureDataset':
        validationWorkspace = os.path.dirname(workspace)
        fdsName = arcpy.ParseTableName(
            desc.name, validationWorkspace
        ).split(",")[2].strip(
        )  # Get the short name of the feature dataset (sans database/owner name)
    else:
        validationWorkspace = workspace
        fdsName = ""

    for child in children:
        # Parse the full table name into database, owner, table name
        database, owner, tableName = [
            i.strip() if i.strip() != "(null)" else "" for i in
            arcpy.ParseTableName(child.name, validationWorkspace).split(",")
        ]
        datasetType = child.datasetType if hasattr(child,
                                                   "datasetType") else ""
        dataShape = child.shapeType if hasattr(child, "shapeType") else ""

        if datasetType == 'FeatureClass' or datasetType == 'Table':
            dtype = dataShape if datasetType == 'FeatureClass' else datasetType
            outrow = [xstr(owner), xstr(tableName), xstr(fdsName), xstr(dtype)]
            try:
                outrow.extend(GetMetadataItems(child.catalogPath))
            except:
                pass
            #print ",".join(outrow)
            yield outrow

        # Recurse to get the contents of feature datasets
        if datasetType == 'FeatureDataset':
            for outrow in ListWorkspaceContentsAndMetadata(child.catalogPath):
                yield outrow
Exemplo n.º 2
0
    def set_delimiters(self):

        processing_log.info("Setting delimiters", )

        try:
            fc_info = arcpy.ParseTableName(self.data_location)
            database, owner, featureclass = fc_info.split(",")
        except:
            processing_log.error("Failed to assess data format")
            return False

        processing_log.info("Type from ParseTableName = %s" % featureclass)

        if re.match(" mdb", featureclass) is not None or re.search(
                "\.mdb", featureclass) is not None:
            self.delim_open = delims_open['mdb']
            self.delim_close = delims_close['mdb']
        elif re.match(" gdb", featureclass) is not None or re.search(
                "\.gdb", featureclass) is not None:
            self.delim_open = delims_open['gdb']
            self.delim_close = delims_close['gdb']
        elif re.match(" shp", featureclass) is not None or re.search(
                "\.shp", featureclass) is not None:
            self.delim_open = delims_open['shp']
            self.delim_close = delims_close['shp']
        elif re.match(" sqlite", featureclass) is not None or re.search(
                "\.db", featureclass) is not None or re.search(
                    "\.sqlite", featureclass) is not None:
            self.delim_open = delims_open['sqlite']
            self.delim_close = delims_close['sqlite']
        elif re.match(" in_memory", featureclass) is not None or re.search(
                "in_memory", featureclass
        ) is not None:  # dbmses use no delimeters. This is just a guess at how to detect if an fc is in one since I don't have access yet.
            self.delim_open = delims_open['in_memory']
            self.delim_close = delims_close['in_memory']
        elif re.match(
                " sde", featureclass
        ) is not None:  # dbmses use no delimeters. This is just a guess at how to detect if an fc is in one since I don't have access yet.
            self.delim_open = ""
            self.delim_close = ""
        else:
            processing_log.warning(
                "No field delimiters for this type of data. We can select features in gdbs, mdbs, shps, in_memory, and possibly sde files (untested)"
            )
            return False

        return True
#coding=utf-8
import arcpy
arcpy.env.workspace = r'C:\Data'
fc = r'C:\Data\result.shp'
fullname = arcpy.ParseTableName(fc)
namelist = fullname.split()
databasename = namelist[0]
ownername = namelist[1]
fcname = namelist[2]
print databasename
print ownername
print fcname
Exemplo n.º 4
0
def Execute(self, parameters, messages):
    import arcsdm.sdmvalues
    try:
        importlib.reload(arcsdm.sdmvalues)
    except:
        reload(arcsdm.sdmvalues)
    try:
        Input_point_features = parameters[0].valueAsText
        Input_raster = parameters[1].valueAsText  #gp.GetParameterAsText(1)
        Value_field = parameters[2].valueAsText  #gp.GetParameterAsText(2)
        UnitArea = parameters[3].value  #gp.GetParameter(3)
        Output_Table = parameters[4].valueAsText  #gp.GetParameterAsText(4)
        arcsdm.sdmvalues.appendSDMValues(gp, UnitArea, Input_point_features)
        arcpy.AddMessage("\n" + "=" * 10 + " Starting area frequency " +
                         "=" * 10)

        #Some locals
        valuetypes = {1: 'Integer', 2: 'Float'}
        joinRastername = None
        Input_table = None
        RasterValue_field = Value_field.title().endswith('Value')
        #if (RasterValue_field):
        #    arcpy.AddMessage("Debug: There is rastervaluefield");
        #else:
        #    arcpy.AddMessage("Debug: There is no rastervaluefield");

        #Create Output Raster
        valuetype = gp.GetRasterProperties(Input_raster, 'VALUETYPE')
        gp.addmessage("Valuetype = " + str(valuetype))
        #gp.addmessage("Value type: %s"%valuetypes[valuetype])
        ##if valuetypes[valuetype].title() == 'Integer': #INTEGER #RDB
        if valuetype <= 8:  #<RDB new integer valuetype property values for arcgis version 10
            if not Input_table:
                if not RasterValue_field:
                    #Create a float raster from a floating attribute field
                    float_type = ('Double', 'Single')
                    fld = gp.listfields(Input_raster, Value_field).next()
                    Value_field_type = fld.type
                    if Value_field_type.title() in float_type:
                        Value_field = Value_field.split('.')
                        if len(Value_field) > 1:
                            gp.adderror("Integer Raster has joined table.")
                            #raise RunTimeError("Integer Raster has joined table.")
                            raise UserException
                        InExpression = "FLOAT(%s.%s)" % (Input_raster,
                                                         Value_field[0])
                        #gp.addwarning(InExpression)
                        TmpRaster = gp.createscratchname(
                            "tmp_AFT_ras", "", "raster", gp.scratchworkspace)
                        gp.SingleOutputMapAlgebra_sa(InExpression, TmpRaster)
                        gp.addmessage(
                            "Floating Raster from Raster Attribute: type %s" %
                            gp.describe(Input_raster).pixeltype)
                    else:
                        gp.adderror(
                            "Integer Raster Attribute field not floating type."
                        )
                        #raise RunTimeError("Integer Raster Attribute field not floating type.")
                        raise UserException
                else:
                    #Create a float raster from the Value field
                    gp.adderror("Integer Raster Value field not acceptable.")
                    #raise
                    #raise RunTimeError ("Integer Raster Value field not acceptable")
                    #raise arcpy.ExecuteError ("Integer Raster Value field not acceptable")
                    raise UserException(
                        "Integer Raster Value field not acceptable")

            Input_raster = TmpRaster  #The input raster is now the new float raster
            valuetype = 2  # Always a floating point raster
        else:  #FLOAT
            gp.addmessage(
                "Floating Raster from Floating Raster Value: type %s" %
                gp.describe(Input_raster).pixeltype)

        # Process: Extract Values of Input Raster to Training Points...
        #gp.AddMessage("tpcnt = %i"%gp.GetCount_management(Input_point_features))
        if gp.GetCount_management(Input_point_features) == 0:
            gp.AddError("Training Points must be selected: %s" %
                        Input_point_features)
            raise UserException
        #gp.AddMessage('Extracting values to points...')
        #Output_point_features = gp.createuniquename("Extract_Train.shp", gp.ScratchWorkspace)
        #gp.ExtractValuesToPoints_sa(Input_point_features, Input_raster, Output_point_features)
        Output_point_features = arcsdm.workarounds_93.ExtractValuesToPoints(
            gp, Input_raster, Input_point_features, "TPFID")

        # Process: Summary Statistics...
        #Get stats of RASTERVALU field in training sites features with extracted points.
        #gp.AddMessage('Getting statistics...')

        #TODO: IF GDB, no .dbf if other - .dbf
        #Output_summary_stats = gp.createuniquename("Ext_Trn_Stats.dbf", gp.scratchworkspace)
        Output_summary_stats = gp.createuniquename("Ext_Trn_Stats",
                                                   gp.scratchworkspace)

        stats_dict = {}
        #gp.addwarning('Got stats...')
        #Get all VALUES from input raster, add to stats_dict dictionary
        #from floatingrasterclass import FloatRasterVAT, rowgen
        flt_ras = FloatRasterVAT(gp, Input_raster)

        rows = flt_ras.FloatRasterSearchcursor()
        gp.Statistics_analysis(Output_point_features, Output_summary_stats,
                               "RASTERVALU FIRST", "RASTERVALU")

        for row in rows:
            stats_dict[row.value] = 0
        num_training_sites = gp.getcount(Output_point_features)
        #gp.addwarning('num_training_sites: %s'%num_training_sites)
        #Get frequency of RASTERVALU in training points extracted values.
        statsrows = rowgen(gp.SearchCursor(Output_summary_stats))

        num_nodata = 0
        for row in statsrows:
            #Get actual raster value from truncated value in Extracted values of point theme.
            #gp.addwarning( 'row.RASTERVALU: %s'%row.RASTERVALU)
            if row.RASTERVALU == flt_ras.getNODATA():
                num_nodata = row.FREQUENCY
            else:
                #NODATA value not included in table
                rasval = flt_ras[row.RASTERVALU]
                #Update stats dictionary with occurence frequencies in Statistics table
                if rasval in stats_dict: stats_dict[rasval] = row.FREQUENCY
        #gp.addwarning("Created stats_dict: %s"%stats_dict)

        num_counts = sum(stats_dict.values())
        if num_counts != num_training_sites - num_nodata:
            gp.addwarning(
                "Stats count and number of training sites in data area do not compare."
            )
        if num_nodata > 0:
            gp.addwarning("%d training points in NoData area." % num_nodata)
        #gp.AddMessage(Output_summary_stats);
        #raise
        gp.AddMessage('Creating table: %s' % Output_Table)
        fullname = arcpy.ParseTableName(Output_Table)
        database, owner, table = fullname.split(", ")
        gp.AddMessage('Output workspace: %s' % os.path.dirname(Output_Table))

        gp.AddMessage('Output table name: %s' % os.path.basename(Output_Table))
        gp.CreateTable_management(os.path.dirname(Output_Table),
                                  os.path.basename(Output_Table))
        #gp.AddMessage("Created output table.")
        gp.MakeTableView(Output_Table, 'output_table')

        gp.AddField_management('output_table', "Frequency", "LONG", "", "", "",
                               "", "NULLABLE", "NON_REQUIRED", "")
        #Precision and scale of RASTERVALU field must be same as field of that name in extract and statistics tables
        gp.AddField_management('output_table', "RASTERVALU", "DOUBLE", "18",
                               "8", "", "", "NULLABLE", "NON_REQUIRED", "")

        # Process: Add Field (2)...

        # Process: Add Field (3)...
        gp.AddField_management('output_table', "Area_sqkm", "DOUBLE", "", "",
                               "", "Area Sq Kilometers", "NULLABLE",
                               "NON_REQUIRED", "")

        # Process: Add Field (5)...
        gp.AddField_management('output_table', "CAPP_CumAr", "DOUBLE", "", "",
                               "", "CAPP Cumulative Area", "NULLABLE",
                               "NON_REQUIRED", "")
        #gp.AddMessage("got to here....9")

        # Process: Add Field (6)...
        gp.AddField_management('output_table', "Eff_CumAre", "DOUBLE", "", "",
                               "", "Efficiency Cumulative Area", "NULLABLE",
                               "NON_REQUIRED", "")
        #gp.AddMessage("got to here....10")

        # Process: Add Field (7)...
        gp.AddField_management('output_table', "Cum_Sites", "DOUBLE", "", "",
                               "", "Cumulative Sites", "NULLABLE",
                               "NON_REQUIRED", "")

        # Process: Add Field (7)...
        gp.AddField_management('output_table', "I_CumSites", "DOUBLE", "", "",
                               "", "100-Cumulative Sites", "NULLABLE",
                               "NON_REQUIRED", "")

        # Process: Add Field (7)...
        gp.AddField_management('output_table', "Eff_AUC", "DOUBLE", "", "", "",
                               "A U C", "NULLABLE", "NON_REQUIRED", "")

        #gp.AddMessage("Created output table and added fields.")

        gp.DeleteField_management(Output_Table, "Field1")

        #Calculate Count, Area, and Percent fields
        #gp.AddMessage("got to here....11")
        #gp.AddWarning('Assume cell size units is meters!')
        factor = (float(gp.CellSize)**2) / 1000000 / UnitArea
        #gp.AddMessage(str(factor))
        #gp.AddMessage("Input_raster path=%s"%os.path.basename(Input_raster))

        #Search raster must be a raster layer
        #gp.addmessage("Value type: %s"%valuetypes[valuetype])
        rasrows = flt_ras.FloatRasterSearchcursor()
        #gp.addmessage('Opened Float Raster Searchcursor...')

        ##Insert some field values in output table
        #Open insert cursor for output
        tblrows = gp.InsertCursor(Output_Table)
        #Create Output records
        for rasrow in rasrows:
            tblrow = tblrows.NewRow()
            tblrow.RASTERVALU = rasrow.Value
            #gp.addwarning(str(rasrow.Value))
            tblrow.Area_sqkm = rasrow.Count * factor
            tblrows.InsertRow(tblrow)

        del tblrow, tblrows
        #gp.AddMessage("No. records in output table %s: %i"%(Output_Table,gp.GetCount_management(Output_Table)))

        #Get total sites from stats table
        totalsites = sum(stats_dict.values())

        #Variables for more stuff
        totalarea = 0.0
        cumArea = 0
        effarea = []
        nSites = []

        #Update Frequency field and get two summations and create two lists
        #gp.addmessage('statvals: '+str(stats_dict.keys()))
        #gp.AddMessage("Calculating Frequency field...")
        tblrows = gp.UpdateCursor(Output_Table)
        tblrow = tblrows.Next()
        stats_found = 0
        while tblrow:
            tblval = tblrow.RASTERVALU
            area = tblrow.Area_sqkm
            totalarea += area
            #tblval is less precision than rasval
            rasval = flt_ras[tblval]
            if rasval in stats_dict:
                frequency = stats_dict[rasval]
                #gp.AddMessage("Found tblval = %s; frequency = %s"%(tblval,frequency))
                tblrow.Frequency = frequency
                tblrows.UpdateRow(tblrow)
                effarea.append(area)
                nSites.append(frequency)
                stats_found += 1
                #gp.AddMessage("Debug: Stats_found =  %s"%stats_found);

            tblrow = tblrows.Next()

        del tblrow, tblrows

        #Check that output table is consistent with statistics
        if stats_found < len(stats_dict):
            gp.adderror('Not enough Values with Frequency > 0 found!')
            assert False
        elif stats_found > len(stats_dict):
            gp.adderror('Too many Values with Frequency > 0 found!')
            assert False
        else:
            pass
            #gp.addmessage('All Values with Frequency > 0 found')

        #From two reversed lists, create two lists and two cumulative summations
        #gp.AddMessage("Calculating CAPP_CumAre,Eff_CumAre,Cum_Sites,I_CumSites fields...")
        effarea_rev = reversed(effarea)  #generator
        nSites_rev = reversed(nSites)  #generator
        effCumarea = 0
        cumSites = 0
        effCumareaList = []
        cumSitesList = []
        for i in range(len(nSites)):
            effCumarea += 100.0 * effarea_rev.next() / totalarea
            effCumareaList.append(effCumarea)
            cumSites += 100.0 * nSites_rev.next() / totalsites
            cumSitesList.append(cumSites)

        #Update four fields from reversed lists
        effCumareaList_rev = reversed(effCumareaList)  #generator
        cumSitesList_rev = reversed(cumSitesList)  #generator
        #gp.AddMessage('doing update....')
        tblrows = gp.UpdateCursor(Output_Table)
        #gp.AddMessage(str(tblrows))
        tblrow = tblrows.Next()
        while tblrow:
            #gp.AddMessage(str(tblrow) + str(i))
            cumArea += 100.0 * tblrow.Area_sqkm / totalarea
            tblrow.CAPP_CumAr = cumArea
            tblrow.Eff_CumAre = effCumareaList_rev.next()
            Cum_Sites = cumSitesList_rev.next()
            tblrow.Cum_Sites = Cum_Sites
            tblrow.SetValue('I_CumSites', 100.0 - Cum_Sites)
            tblrows.UpdateRow(tblrow)
            tblrow = tblrows.Next()
        #gp.addmessage('done.')
        del tblrow, tblrows

        #Create two more lists
        #gp.AddMessage("Calculating Eff_AUC field...")
        Eff_CumAre = []
        Cum_Sites = []
        tblrows2 = gp.SearchCursor(Output_Table)
        tblrow2 = tblrows2.Next()
        tblrow2 = tblrows2.Next()
        while tblrow2:
            Eff_CumAre.append(tblrow2.Eff_CumAre)
            Cum_Sites.append(tblrow2.Cum_Sites)
            tblrow2 = tblrows2.Next()

        #Finally, calculate the Eff_AUC field from two lists and Efficiency
        sumEff_AUC = 0.0
        tblrows1 = gp.UpdateCursor(Output_Table)
        tblrow1 = tblrows1.Next()
        for i in range(len(Eff_CumAre)):
            val = 0.5 * (tblrow1.Eff_CumAre - Eff_CumAre[i]) * (
                tblrow1.Cum_Sites + Cum_Sites[i]) / (100.0 * 100.0)
            sumEff_AUC += val
            #gp.AddMessage(str(val))
            tblrow1.Eff_AUC = val
            tblrows1.UpdateRow(tblrow1)
            tblrow1 = tblrows1.Next()
        #Calculate last row
        if tblrow1:
            #gp.AddMessage("Calculating last row...")
            val = 0.5 * (tblrow1.Eff_CumAre) * (tblrow1.Cum_Sites) / (100.0 *
                                                                      100.0)
            sumEff_AUC += val
            tblrow1.Eff_AUC = val
            #gp.AddMessage(str(val))
            tblrows1.UpdateRow(tblrow1)
        del tblrow1, tblrows1

        gp.addmessage('Efficiency: %.1f%%' % (sumEff_AUC * 100.0))

        if Input_table and joinRastername:  #In case of joined integer raster and table
            gp.RemoveJoin_management(joinRastername, Input_table)

    except UserException:
        print('User exception caught. ')

    except arcpy.ExecuteError:
        #TODO: Clean up all these execute errors in final version
        gp.AddMessage("AreaFrequency caught: arcpy.ExecuteError")
        gp.AddMessage("-------------- END EXECUTION ---------------")
        raise
    except:
        #In case of joined integer raster and table
        arcpy.AddMessage("Tsip")
        if Input_table and joinRastername:
            gp.RemoveJoin_management(joinRastername, Input_table)
    # get the traceback object
        tb = sys.exc_info()[2]
        # tbinfo contains the line number that the code failed on and the code from that line
        tbinfo = traceback.format_tb(tb)[0]
        # concatenate information together concerning the error into a message string
        pymsg = "PYTHON ERRORS:\nTraceback Info:\n" + tbinfo + "\nError Info:\n    " + \
            str(sys.exc_type)+ ": " + str(sys.exc_value) + "\n"
        # generate a message string for any geoprocessing tool errors
        msgs = "GP ERRORS:\n" + gp.GetMessages(2) + "\n"
        gp.AddError(msgs)

        # return gp messages for use with a script tool
        gp.AddError(pymsg)

        # print messages for use in Python/PythonWin
        print(pymsg)
        print(msgs)
        raise
def make_workspace_copy(inputfeatures, theworkspace, dotopologycheck,
                        dosimplify, dosimplify_method, dosimplify_tolerance,
                        thefield):
    """This function tests the input features for the topology error 'Must Be Single Part',
    and returns the Origin Feature's Object ID of the errant features to the calling module. Beware:
    the origing feature's object ID is that of the COPY of the input features. The object ID's of the copy
    may be different from the source "inputfeautures"!!!!. This is why the function passes back the name of the COPY so that the processing can
    continue on that feature class where the topologically errant features will be correctly identified
    by the values in the export topology errors geoprocessing tool."""

    ##    arcpy.AddMessage("funcs.make_workspace_copy")

    #Process the
    #roads with the simplify_line tool with the point_remove option at a tolerance of 0.001 meters so that redundant vertices on staight lines are removed.
    #If the user specifies their own parameters for simplify_line, THAT ARE NOT POINT_REMOVE AND THE TOLERANCE IS > 0.001 METERS, that is done additionally,
    #afterwards:

    #this section makes the feature class datasets, feature class names, and topology name:
    badfids = set()
    fdname = "KDOT_Topology_Check"  #the feature dataset name for the topology check
    fdnamepath = theworkspace + "\\" + fdname  #the complete pathname of the feature dataset
    tpname = "CheckTopology"  #the name of the topology
    topology_name = fdnamepath + "\\" + tpname  #the complete pathname of the topology
    ##    arcpy.AddMessage("make_workspace_copy, fdnamepath: "+fdnamepath)
    ##    arcpy.AddMessage("make_workspace_copy, topology_name: "+topology_name)
    fcname = arcpy.ParseTableName(
        inputfeatures,
        theworkspace)  #Split the inputfeatures to find the name from the path.
    namelist = fcname.split(
        ", "
    )  #the feature class name without the path. Used in creating a copy in the feature dataset.
    ##    arcpy.AddMessage('fcname = '+ namelist[2])
    topology_featureclass = fdnamepath + '\\' + namelist[
        2] + '_check'  #the copy of inputfeatures used for the topology check
    topology_featureclass_errors = namelist[
        2] + '_errors'  # the basename used for the export topology errors tool
    ##    arcpy.AddMessage(topology_featureclass)
    topology_featureclass_errors_line = fdnamepath + '\\' + namelist[
        2] + '_errors_line'  #the output name of LINE errors from the export topology errors tool

    #Delete if the feature dataset currently exists:
    doesexistfd = arcpy.Exists(fdnamepath)
    try:
        if doesexistfd:
            arcpy.AddMessage(
                'Previous topology check feature dataset exists. Now deleteing '
            )
            arcpy.Delete_management(fdnamepath)
    except arcpy.ExecuteError:
        print arcpy.GetMessages(2)
    except Exception as e:
        print e.args[0]

    #Re-create the topology feature dataset:
    arcpy.AddMessage('Generating the topology check scratch feature dataset')
    arcpy.CreateFeatureDataset_management(theworkspace, fdname, inputfeatures)

    #Make a copy of the input roads in the feature dataset that contains the topology:
    try:
        arcpy.AddMessage(
            'Generating a copy of the input feature class in the scratch feature dataset'
        )
        #This replaces the function "arcpy.CopyFeatures_management" so that we can retain the original FID:
        ##        make_copies_of_features(inputfeatures,  topology_featureclass, "Original_OID")
        make_copies_of_features(inputfeatures, topology_featureclass, thefield)
##        arcpy.CopyFeatures_management(inputfeatures, topology_featureclass)
    except arcpy.ExecuteError:
        print arcpy.GetMessages(2)
    except Exception as e:
        print e.args[0]

    #Perform the topology check, if checked ON in input parameters:
##    arcpy.AddMessage('make_workspace_copy, dotopology = ' + str(dotopologycheck))
##    if(dotopologycheck == True):
    if (str(dotopologycheck) == 'true'):
        arcpy.AddMessage('Creating the topology')
        arcpy.CreateTopology_management(fdnamepath, tpname)

        #Add the input roads to the topology
        arcpy.AddMessage(
            'Adding the copy of the input features to the topology')
        arcpy.AddFeatureClassToTopology_management(topology_name,
                                                   topology_featureclass, 1, 1)
        #Add a rule:
        arcpy.AddMessage('Adding rule "Must Be Single Part" to the topology')
        arcpy.AddRuleToTopology_management(topology_name,
                                           "Must Be Single Part (Line)",
                                           topology_featureclass)
        #Validate the topology:
        arcpy.AddMessage('Validating the topology')
        arcpy.ValidateTopology_management(topology_name)
        #Export the errant features to a feature class
        arcpy.AddMessage(
            'Exporting the topologically-errant feature to feature class ' +
            topology_featureclass_errors)
        arcpy.ExportTopologyErrors_management(topology_name, fdnamepath,
                                              topology_featureclass_errors)
        arcpy.AddMessage("Completed exporting topology errors")

        #Extract the values from field "OriginObjectID". This is a field generated to identify the OID's of errant features:
        ##        arcpy.AddMessage('Retrieving the object ID''s of the errant features')
        with arcpy.da.SearchCursor(topology_featureclass_errors_line,
                                   ["OriginObjectID"]) as cursor:
            for row in cursor:
                ##                arcpy.AddMessage(str(row[0]))
                badfids.add(row[0])

    #Perform at the least, the default line simplification of 0.001 meters or 0.00328084 feet
    #SimplifyLine(mergedFeatures, simplifiedFeatures, dosimplify_method, dosimplify_tolerance, "RESOLVE_ERRORS", "KEEP_COLLAPSED_POINTS", "CHECK")
    simplified_featureclass = fdnamepath + '\\_simplified_roads'
    arcpy.SimplifyLine_cartography(topology_featureclass,
                                   simplified_featureclass, dosimplify_method,
                                   dosimplify_tolerance, False, False, False)

    arcpy.AddMessage('completed creating a workspace copy....')
    ##    arcpy.AddMessage('completed funcs.make_workspace_copy')
    return badfids, simplified_featureclass
Exemplo n.º 6
0
def UnqualifiedTableName(path):
    return arcpy.ParseTableName(path).split(",")[2].strip()