コード例 #1
0
out_xls = arcpy.GetParameterAsText(1)  # Output Excel Name (add xls extension)

dissolveFields = ["AREASYMBOL", "MUSYM"]
#Dissolve Features
arcpy.Dissolve_management(inFC, "outFCDISSOLVE", dissolveFields)

#Add Field

arcpy.AddField_management(
    "outFCDISSOLVE",
    "New_MUSYM",
    "TEXT",
)

#Sort

arcpy.Sort_management("outFCDISSOLVE", "outFCDISSOLVE_SORT",
                      [["AREASYMBOL", "ASCENDING"], ["MUSYM", "ASCENDING"]])

#outa_xls = "MLRA_INTERSECT.xls"
#Table to Excel
arcpy.TableToExcel_conversion("outFCDISSOLVE_SORT", out_xls)

#dropFields = ["OBJECTID", "Shape_Area", "Shape_Length"]
#arcpy.DeleteField_management(out_xls, dropFields)

#Delete Feature Classes
arcpy.Delete_management("outFCDISSOLVE")
#arcpy.Delete_management ("outFCDISSOLVE_SORT")

print "Script Completed"
コード例 #2
0
        env.workspace = workspace
    except:
        print "Debris map workspace cannot be created. It may already exist."
        sys.exit()

    if Want_CloudRemoval != 'True':
        mask_dir = os.makedirs(workspace + 'empty')
    import DebrisMap
    DebrisMap.DebrisMap(workspace, data_dir, landsat, shp_dir, mask_dir,
                        A_remove, A_fill, Want_CloudRemoval)
    finddeb = arcpy.ListFeatureClasses('*MERGED*')
    debarea = workspace + finddeb[0]
    del finddeb
    if Want_CloudRemoval != 'True':
        del mask_dir
        arcpy.Delete_management(workspace + 'empty')

##-----------------------------------------  CliffProcessingSegments  ------------------------------------------------
arcpy.CalculateAreas_stats(debarea, 'debareaMeters.shp')
rows = arcpy.SearchCursor('debareaMeters.shp')
for row in rows:
    debarea_m2 = row.getValue("F_AREA")
del row, rows
arcpy.Delete_management('debareaMeters.shp')
workspaceSplit = workspace.split("\\")[-2]
workspace = workspace[:-workspaceSplit.count('')]
workspace = workspace + 'CliffProcessingSegments\\'
fishnetRes = L_t  #name follows Herreid and Pellicciotti, 2018
lookDistance = n_c  #name follows Herreid and Pellicciotti, 2018
try:
    os.makedirs(workspace)
コード例 #3
0
                row[1] = "http://www.rideuta.com/mc/?page=Bus-BusHome-Route354"
            elif (row[0] == "455"):
                row[1] = "http://www.rideuta.com/mc/?page=Bus-BusHome-Route455"
            elif (row[0] == "473"):
                row[1] = "http://www.rideuta.com/mc/?page=Bus-BusHome-Route473"
            elif (row[0] == "9"):
                row[1] = "http://www.rideuta.com/mc/?page=Bus-BusHome-Route9"
            elif (row[0] == "6"):
                row[1] = "http://www.rideuta.com/mc/?page=Bus-BusHome-Route6"
            elif (row[0] == "902"):
                row[1] = "http://www.rideuta.com/mc/?page=Bus-BusHome-Route902"

            cursor.updateRow(row)

            #Delete spatial join
            arcpy.Delete_management(spatialJoin)
            arcpy.Delete_management(newStopsFC)

else:

    print "no new stops were added.\Updates are complete.n\checking for valid URL's now...\n\n"

#Delete Temp Files
arcpy.Delete_management(clippedStops)

#Check that URL links are valid
import BusStops_CheckURL
BusStops_CheckURL

print "done.\ncheck both route and stops tables for null values"
コード例 #4
0
ファイル: def__SLEM.py プロジェクト: krm75/arcGNAT
def SLEM(Line, Distance, Output, TF):

    CopyLine = arcpy.CopyFeatures_management(Line, r"in_memory\CopyLine")

    fieldnames = [f.name for f in arcpy.ListFields(CopyLine)]

    #/identification of the polyline type : raw, UGOs, sequenced UGOs, or AGOs
    k = 0
    if "Rank_AGO" in fieldnames:
        k = 3
    elif "Order_ID" in fieldnames:
        k = 2
    elif "Rank_UGO" in fieldnames:
        k = 1

    ################################
    ########## Raw polyline ########
    ################################
    if k == 0:

        #/shaping of the segmented result
        arcpy.AddField_management(CopyLine, "Rank_UGO", "LONG", "", "", "", "",
                                  "NULLABLE", "NON_REQUIRED", "")
        arcpy.CalculateField_management(CopyLine, "Rank_UGO",
                                        "!" + fieldnames[0] + "!",
                                        "PYTHON_9.3", "")
        arcpy.AddField_management(CopyLine, "From_Measure", "DOUBLE", "", "",
                                  "", "", "NULLABLE", "NON_REQUIRED", "")
        arcpy.CalculateField_management(CopyLine, "From_Measure", "0",
                                        "PYTHON_9.3", "")
        arcpy.AddField_management(CopyLine, "To_Measure", "DOUBLE", "", "", "",
                                  "", "NULLABLE", "NON_REQUIRED", "")
        arcpy.CalculateField_management(CopyLine, "To_Measure",
                                        "!shape.length!", "PYTHON_9.3", "")

        #/conversion in routes
        LineRoutes = arcpy.CreateRoutes_lr(CopyLine, "Rank_UGO",
                                           r"in_memory\LineRoutes",
                                           "TWO_FIELDS", "From_Measure",
                                           "To_Measure")

        #/creation of the event table
        PointEventTEMP = arcpy.CreateTable_management("in_memory",
                                                      "PointEventTEMP", "", "")
        arcpy.AddField_management(PointEventTEMP, "Rank_UGO", "LONG", "", "",
                                  "", "", "NULLABLE", "NON_REQUIRED", "")
        arcpy.AddField_management(PointEventTEMP, "Distance", "DOUBLE", "", "",
                                  "", "", "NULLABLE", "NON_REQUIRED", "")
        arcpy.AddField_management(PointEventTEMP, "To_M", "DOUBLE", "", "", "",
                                  "", "NULLABLE", "NON_REQUIRED", "")

        UPD_SL.UpToDateShapeLengthField(LineRoutes)

        rowslines = arcpy.SearchCursor(LineRoutes)
        rowsevents = arcpy.InsertCursor(PointEventTEMP)
        for line in rowslines:
            tempdistance = float(line.Shape_Length)
            while (tempdistance > float(0)):
                row = rowsevents.newRow()
                row.Rank_UGO = line.Rank_UGO
                row.To_M = max(0, tempdistance - float(Distance))
                row.Distance = tempdistance
                rowsevents.insertRow(row)
                tempdistance = tempdistance - float(Distance)
        del rowslines
        del rowsevents

        #/creation of the route event layer
        MakeRouteEventTEMP = arcpy.MakeRouteEventLayer_lr(
            LineRoutes, "Rank_UGO", PointEventTEMP,
            "Rank_UGO LINE Distance To_M", r"in_memory\MakeRouteEventTEMP")
        Split = arcpy.CopyFeatures_management(MakeRouteEventTEMP,
                                              r"in_memory\Split", "", "0", "0",
                                              "0")
        Sort = arcpy.Sort_management(
            Split, Output,
            [["Rank_UGO", "ASCENDING"], ["Distance", "ASCENDING"]])

        arcpy.DeleteField_management(Sort, "To_M")

        #/calculation of the "Distance" field
        UPD_SL.UpToDateShapeLengthField(Sort)

        rows1 = arcpy.UpdateCursor(Sort)
        rows2 = arcpy.UpdateCursor(Sort)
        line2 = rows2.next()
        line2.Distance = 0
        rows2.updateRow(line2)
        nrows = int(str(arcpy.GetCount_management(Sort)))
        n = 0
        for line1 in rows1:
            line2 = rows2.next()
            if n == nrows - 1:
                break
            if n == 0:
                line1.Distance = 0
            if line2.Rank_UGO == line1.Rank_UGO:
                line2.Distance = line1.Distance + line1.Shape_Length
                rows2.updateRow(line2)
            if line2.Rank_UGO != line1.Rank_UGO:
                line2.Distance = 0
                rows2.updateRow(line2)

            n += 1

        #/deleting of the temporary files
        if str(TF) == "true":
            arcpy.Delete_management(Split)
            arcpy.Delete_management(CopyLine)
            arcpy.Delete_management(LineRoutes)
            arcpy.Delete_management(PointEventTEMP)

    return Sort
コード例 #5
0
ファイル: task2-post.py プロジェクト: swotai/la_expo_python
def NAtoCSV_trans(inSpace, inGdb, inNetworkDataset, impedanceAttribute,
                  accumulateAttributeName, inOrigins, inDestinations,
                  outNALayerName, outFile, outField):
    '''
    Same as NAtoCSV, but removed "oneway" since Transit network doesnot have this attribute
    Also changed the SNAP code to "ions" for all stations
    '''
    fields = outField
    import arcpy
    from arcpy import env
    try:
        #Check out the Network Analyst extension license
        if arcpy.CheckExtension("Network") == "Available":
            arcpy.CheckOutExtension("Network")
        else:
            # Raise a custom exception
            print "Network license unavailable, make sure you have network analyst extension installed."

        #Check out the Network Analyst extension license
        arcpy.CheckOutExtension("Network")

        #Set environment settings
        env.workspace = inSpace + inGdb
        env.overwriteOutput = True

        #Create a new OD Cost matrix layer.
        outNALayer = arcpy.na.MakeODCostMatrixLayer(
            inNetworkDataset, outNALayerName, impedanceAttribute, "#", "#",
            accumulateAttributeName, "ALLOW_UTURNS", "#", "NO_HIERARCHY", "#",
            "NO_LINES", "#")

        #Get the layer object from the result object. The OD cost matrix layer can
        #now be referenced using the layer object.
        outNALayer = outNALayer.getOutput(0)

        #Get the names of all the sublayers within the OD cost matrix layer.
        subLayerNames = arcpy.na.GetNAClassNames(outNALayer)

        #Stores the layer names that we will use later
        originsLayerName = subLayerNames["Origins"]
        destinationsLayerName = subLayerNames["Destinations"]
        linesLayerName = subLayerNames["ODLines"]

        #Adjust field names
        #Exploit the fact that the detector feature is named hd_ML_snap, (or AllStationsML)
        #change the field mapping of Name to id_stn
        oriField = "Name ID_TAZ12A #"
        oriSort = "ID_TAZ12A"
        destField = "Name ID_TAZ12A #"
        destSort = "ID_TAZ12A"
        searchMetro = "BlueLine_Split SHAPE;GoldLine_split SHAPE;GreenLine_split SHAPE;LABus_prj NONE;LABus_prj_conn NONE;Metro_Tiger_Conn SHAPE;Orange_Conn SHAPE;OrangeLine_split SHAPE;RedLine_split SHAPE;Silver_Conn SHAPE;SilverLine_split SHAPE;TAZ_Tiger_Conn NONE;tl_2012_LAC_prj NONE;BusStopsWLines_prj NONE;Metro_Tiger_Conn_pt SHAPE;Orange_Conn_pt SHAPE;Silver_Conn_pt SHAPE;PreBusDPS_ND_Junctions NONE"
        searchTAZ = "BlueLine_Split NONE;GoldLine_split NONE;GreenLine_split NONE;LABus_prj NONE;LABus_prj_conn NONE;Metro_Tiger_Conn NONE;Orange_Conn NONE;OrangeLine_split NONE;RedLine_split NONE;Silver_Conn NONE;SilverLine_split NONE;TAZ_Tiger_Conn SHAPE;tl_2012_LAC_prj NONE;BusStopsWLines_prj NONE;Metro_Tiger_Conn_pt NONE;Orange_Conn_pt NONE;Silver_Conn_pt NONE;PreBusDPS_ND_Junctions NONE"
        print "Origins: ", inOrigins, " Destinations: ", inDestinations
        if "Station" in inOrigins:
            oriField = "Name id_stn #"
            oriSort = "id_stn"
            arcpy.AddLocations_na(outNALayer,
                                  originsLayerName,
                                  inOrigins,
                                  oriField,
                                  sort_field=oriSort,
                                  append="CLEAR",
                                  search_criteria=searchMetro)
            print "loaded stations onto transit network (search_criteria)"
        else:
            arcpy.AddLocations_na(outNALayer,
                                  originsLayerName,
                                  inOrigins,
                                  oriField,
                                  sort_field=oriSort,
                                  append="CLEAR",
                                  search_criteria=searchTAZ)
            print "loaded stations onto network"

        if "Station" in inDestinations:
            destField = "Name id_stn #"
            destSort = "id_stn"
            arcpy.AddLocations_na(outNALayer,
                                  destinationsLayerName,
                                  inDestinations,
                                  destField,
                                  sort_field=destSort,
                                  append="CLEAR",
                                  search_criteria=searchMetro)
            print "loaded stations onto transit network (search_criteria)"
        else:
            arcpy.AddLocations_na(outNALayer,
                                  destinationsLayerName,
                                  inDestinations,
                                  destField,
                                  sort_field=destSort,
                                  append="CLEAR",
                                  search_criteria=searchTAZ)
            print "loaded stations onto network"

        #Solve the OD cost matrix layer
        print "Begin Solving"
        arcpy.na.Solve(outNALayer)
        print "Done Solving"

        # Extract lines layer, export to CSV
        for lyr in arcpy.mapping.ListLayers(outNALayer):
            if lyr.name == linesLayerName:
                with open(outFile, 'w') as f:
                    #f.write(','.join(fields)+'\n') # csv headers
                    with arcpy.da.SearchCursor(lyr, fields) as cursor:
                        print "Successfully created lines searchCursor.  Exporting to " + outFile
                        for row in cursor:
                            f.write(','.join([str(r) for r in row]) + '\n')

        # Deleteing using del outNALayer is not enough.  Need to delete within arcpy to release
        arcpy.Delete_management(outNALayer)

    except Exception as e:
        # If an error occurred, print line number and error message
        import sys
        tb = sys.exc_info()[2]
        print "An error occurred in NAtoCSV_Trans line %i" % tb.tb_lineno
        print str(e)

    finally:
        #Check the network analyst extension license back in, regardless of errors.
        arcpy.CheckInExtension("Network")
コード例 #6
0
import arcpy
arcpy.env.overwriteOutput = True
folderforgdb = arcpy.GetParameterAsText(0)
datapoints = arcpy.GetParameterAsText(1)
clipzon = arcpy.GetParameterAsText(2)
nameGDB = arcpy.GetParameterAsText(3)
#folderforgdb = r"E:\programin\semestr2\samrob\s12_GIS_FILE"
#datapoints = r"E:\programin\semestr2\samrob\s12_GIS_FILE\Programming_in_GIS_2020_L7_s12\OSMpoints.shp"
#clipzon = r"E:\programin\semestr2\samrob\s12_GIS_FILE\Programming_in_GIS_2020_L7_s12\CentralAmerica.shp"
#nameGDB = 'salvador'
arcpy.CreateFileGDB_management(folderforgdb, nameGDB + '.gdb')
arcpy.AddMessage('Created new File GDB: {}.gdb'.format(nameGDB))
arcpy.env.workspace = folderforgdb + "\\" + nameGDB + '.gdb'
amenities = ['school', 'hospital', 'place_of_worship']
country = arcpy.GetParameterAsText(4)
arcpy.MakeFeatureLayer_management(clipzon, 'zoneclip', '"NAME" = ' + "'"+country + "'")
arcpy.Clip_analysis(datapoints, 'zoneclip', 'clipshp')
arcpy.AddMessage('Objects are cut for a given area ({})'.format(country))
for i in amenities:
    arcpy.MakeFeatureLayer_management('clipshp', 'clip', '"amenity" = ' + "'" + i + "'")
    arcpy.CopyFeatures_management('clip', 'zones_' + i)
    arcpy.AddField_management('zones_' + i, 'source', 'TEXT')
    arcpy.AddField_management('zones_' + i, 'GID', 'DOUBLE')
    with arcpy.da.UpdateCursor('zones_' + i, ['source', 'GID', 'id']) as cursor:
        for row in cursor:
            row[1] = row[2]
            row[0] = "OpenStreetMap"
            cursor.updateRow(row)
    arcpy.AddMessage('Created file for location '+i)
arcpy.Delete_management('clipshp')
コード例 #7
0
def ProcessRoutine(ArgVariables):
    """Main Function that operates the logic of the script."""
    try:

        arcpy.AddMessage("\nInputData: " + InputData)
        arcpy.AddMessage("WorkingDir: " + WorkingDir)
        arcpy.AddMessage("CreateStandAloneXML: " + CreateStandAloneXML)
        arcpy.AddMessage("UseStartTemplate: " + UseStartTemplate)
        arcpy.AddMessage("StarterTemplate: " + CustomStarterTemplate)

        myDataType, myFeatType = Get_Data_Type(
        )  #Determine data type, and feature type if applicable
        arcpy.AddMessage("Data type being evaluated: " + myDataType)
        arcpy.AddMessage("Feature type being evaluated: " + myFeatType + "\n")

        SourceFile = os.path.split(os.path.splitext(InputData)[0])[
            1]  #The name of the input file. No extension. No full path.
        OriginalMDRecord = os.path.join(
            WorkingDir, SourceFile +
            "_Original.xml")  #File pointer to unmodified original.
        FGDCXML = os.path.join(
            WorkingDir, SourceFile +
            "_FGDC.xml")  #File pointer to the copy we will modify/update.

        #Create and keep 'Original' metadata copy in working directory.
        try:
            MDTools.CreateCopyMDRecord(InputData, OriginalMDRecord)
        except:
            pass

        #After we made a copy of the input's original MD, start process from custom template if it is toggled.
        if str(UseStartTemplate) == "true":
            try:
                arcpy.MetadataImporter_conversion(
                    CustomStarterTemplate, InputData
                )  # This imports only: does not convert and does not sync
                arcpy.AddMessage(
                    "The user's custom starter record is now being imported into the input data set...\n"
                )
            except:
                arcpy.AddWarning("!!!!!!!")
                arcpy.AddWarning(
                    "There was a problem importing from the Custom Starter Template. Please ensure that the file is here: ("
                    + CustomStarterTemplate + ")")
                arcpy.AddWarning("!!!!!!!\n")
                sys.exit(1)

        try:  #Extract any existing metadata, and translate to FGDC format if necessary.
            ExportFGDC_MD_Utility.GetMDContent(
                InputData, FGDCXML, WorkingDir
            )  #Export (translate if necessary) input metadata to FGDC format. Remove ESRI 'sync' & 'reminder' elements.
        except:
            arcpy.AddMessage(
                "No metadata could be found for this record. A new file will be created.\n"
            )
            MDTools.CreateCopyMDRecord(GenericTemplate, FGDCXML)

        MDTools.RemoveNameSpace(
            FGDCXML
        )  #Eliminate namespace tags from root element in xml if present (appear when tool is run on spatial data sets).
        MDTools.CheckMasterNodes(
            FGDCXML
        )  #Ensure all the key FGDC-CSDGM nodes are present in the record.


        if not InputIsXML and not InputIsCSV \
                and not InputIsExcel and not InputIsGDB \
                and desc.DatasetType != "Table": #Only attempt to extract/update spatial properties from spatial data sets.

            try:
                GCS_ExtentList = Get_LatLon_BndBox()[1]
            except:
                arcpy.AddWarning("!!!!!!!")
                arcpy.AddWarning(
                    "A problem was encountered when attempting to retrieve the spatial extent of the input data set. Please review the tool documentation and ensure the data set is a valid input and ENSURE THAT A COORDINATE SYSTEM HAS BEEN DEFINED."
                )
                arcpy.AddWarning("!!!!!!!\n")
                sys.exit()

            #Get/Update Bounding Coordinates
            GCS_ExtentList = Get_LatLon_BndBox()[1]
            Local_ExtentList = Get_LatLon_BndBox()[0]
            if "nan" in str(Local_ExtentList):
                arcpy.AddWarning(
                    "No spatial extent could be found for the input spatial data set. Please review the 'Bounding Extent' in the final metadata record. (Values will be set to maximum global extent).\n"
                )
            arcpy.AddMessage("Bounding Coordinates (Local): " +
                             str(Local_ExtentList))
            arcpy.AddMessage("Bounding Coordinates (Geographic): " +
                             str(GCS_ExtentList) + "\n")

            WestBC = Get_LatLon_BndBox()[1][0]
            EastBC = Get_LatLon_BndBox()[1][2]
            NorthBC = Get_LatLon_BndBox()[1][3]
            SouthBC = Get_LatLon_BndBox()[1][1]
            MDTools.WriteBoundingInfo(FGDCXML, WestBC, EastBC, NorthBC,
                                      SouthBC)

            #Get/Update Spatial Data Organization
            SpatialDataOrgInfo = Get_Spatial_Data_OrgInfo(
                InputData, myDataType, myFeatType)
            MDTools.WriteSpatialDataOrgInfo(FGDCXML, SpatialDataOrgInfo)

            #Get/Update Spatial Reference Information
            SpatialReferenceInfo = SpatialRefTools.SpatialRefInfo(
                GCS_PrjFile, InputData, WorkingDir, GCS_ExtentList)
            MDTools.WriteSpatialRefInfo(FGDCXML, SpatialReferenceInfo)
            #Handle vertical coordinate system?

        #Get/Update Geospatial Presentation Form. Also updates Format Name (within Distribution Info).
        #(Skip this step and leave existing content if tool input is XML).
        if not InputIsXML:
            MDTools.WriteGeospatialForm(FGDCXML, myDataType, myFeatType)

        #Get/Update Native Environment Details
        #This will be used as a switch to determine which .exe for the EA builder needs to be run (for either 10.0, 10.1, or 10.2).
        #The version info is also written out to the XML record in the 'Native Environment' section.
        ESRIVersion = GetESRIVersion_WriteNativeEnv(FGDCXML)

        #Get/Update Metadata Date of Editing
        Now = datetime.datetime.now()
        MDDate = Now.strftime("%Y%m%d")
        MDTools.WriteMDDate(FGDCXML, MDDate)

        #Update Entity/Attribute Section
        if InputIsCSV or InputIsExcel:
            contents_fname = InputData
        elif not InputIsXML and not InputIsGDB:
            data_contents = introspector.introspect_dataset(InputData)
            input_fname = os.path.split(InputData)[1]
            contents_fname = os.path.join(WorkingDir, input_fname + ".p")
            pickle.dump(data_contents, open(contents_fname, "wb"))
        else:
            contents_fname = ''

        #Rerun FGDC Translator tool to handle newly-added elements that are out of order in XML tree.
        MDTools.ReRunFGDCTranslator(FGDCXML)

        #Re-import new metadata to the data set to capture E/A tool changes. If input file is a stand alone .xml this step is skipped
        if not InputIsXML:
            try:
                arcpy.MetadataImporter_conversion(
                    FGDCXML, InputData
                )  # This imports only: does not convert and does not sync
            except BaseException as e:
                arcpy.AddWarning("There was a problem during the metadata"
                                 " importation process.\n{}".format(str(e)))

        #Open up Metadata Editor and allow user to review/update
        outXML = os.path.splitext(FGDCXML)[0] + "temp.xml"
        #Arg = '"' + MetadataEditor + '"' + " " + '"' + FGDCXML + '"' + " " + '"' + outXML + '"' + " " + '"' + Browser + '"' #Start and end quotes are necessary to handle spaces in file names and IE Path when passing to Command Prompt.
        #Arg = '"' + MetadataEditor + '"' + " " + '"' + FGDCXML + '"' + " " + '"' + outXML + '"' + " "
        Arg = '"%s" "%s" "%s"' % (python_exe, mdwiz_py_fname, FGDCXML)
        if contents_fname:
            Arg += ' "{}"'.format(contents_fname)
        arcpy.AddWarning(Arg)
        arcpy.AddMessage("*************************")
        arcpy.AddMessage(
            "\nPLEASE UPDATE/REVIEW THE METADATA INFO IN THE POP-UP WINDOW.")
        arcpy.AddMessage("(Allow a moment for the window to open).\n")
        arcpy.AddMessage("*************************")
        try:
            winsound.PlaySound(
                r"C:\Windows\Media\Cityscape\Windows Exclamation.wav",
                winsound.SND_FILENAME)
        except:
            pass

        p = subprocess.Popen(Arg,
                             stdout=subprocess.PIPE,
                             stderr=subprocess.PIPE)
        output, error = p.communicate()

        if output:
            arcpy.AddMessage("MetadataWizard output:\n  {}".format(output))
        if error:
            arcpy.AddWarning(sys.executable)
            arcpy.AddWarning("An error was encountered opening "
                             "the MetadataWizard application:\n")
            arcpy.AddWarning("Error> error  {}".format(error.strip()))
            sys.exit(1)

        p.wait()

        try:
            MDTools.RemoveStyleSheet(
                FGDCXML
            )  #MP actually removes the stylesheet in VB.NET app... this is a redundancy here.
            # MDTools.ReplaceXML(FGDCXML, outXML)
        except:
            arcpy.AddWarning(
                "No content was saved in the Metadata Editor window. The metadata record was not updated.\n"
            )

        #Re-import new metadata to the data set to capture user edits from the Metadata Editor window.
        try:
            arcpy.MetadataImporter_conversion(
                FGDCXML, InputData
            )  # This imports only: does not convert and does not sync
            arcpy.AddMessage(
                "The updated metadata record is now being re-imported into the input data set...\n"
            )
        except:
            arcpy.AddMessage(
                "There was a problem during the metadata importation process!")

        #Remove the Error Report file generated by MP from the Main Metadata Editor.
        MP_ErrorReport = os.path.splitext(
            FGDCXML)[0] + "temp_MP_ErrorReport.xml"
        try:
            os.remove(MP_ErrorReport)
        except:
            pass

        #Remove FGDC XML file if the toggle to preserve 'stand-alone' file is configured to FALSE. This appears to be passed as a string rather than boolean.
        if str(CreateStandAloneXML) == "false":
            try:
                arcpy.Delete_management(FGDCXML)
                arcpy.AddMessage(
                    "The Wizard will now remove the stand-alone FGDC XML, as requested in the tool interface...\n"
                )
            except:
                arcpy.AddMessage(
                    "There was a problem removing the stand-alone XML file. Try removing the file (%s) manually from the working directory.\n"
                    % FGDCXML)

        #Remove the 'ArcpyTranslate.xml' temp file that gets created when exporting from ESRI metadata to FGDC.
        try:
            os.remove(os.path.join(WorkingDir, 'ArcpyTranslate.xml'))
        except:
            pass

    except arcpy.ExecuteError:
        arcpyError()
    except:
        pythonError()
コード例 #8
0
	
	for Year in YearsList:
		
   	    if Year not in YrList:
   		    arcpy.AddMessage("Invalid Year...%s" % Year)
   		    sys.exit(0)
   		
   	    cSize = 30
   	
   	    for yr in list56:
   		    if yr == Year:
   			    cSize = 56
   		
   	    env.workspace = FileGDB
   	    env.scratchWorkspace = env.scratchFolder
   
   	    # Get data
   	    outTemp = getNASS(Year,bBox,cSize)
   	    
   	    Rslt = arcpy.GetRasterProperties_management(outTemp, "UNIQUEVALUECOUNT")
   	    
   	    if Rslt.getOutput(0) != '1':
   	        arcpy.AddMessage("Classes: %s" % Rslt.getOutput(0))
   	        PrjExtRaster(outTemp, theSR, cSize, fcBuf, Year, ACPFlkup)
   	    else:
   	        arcpy.AddMessage("No valid NASS data for %s" % Year)
   	        arcpy.Delete_management(outTemp)
   	        

   	        
   
コード例 #9
0
    # Process: Make Feature Layer
    rd_shapefile = rd_shapefile.replace('"', '')
    dp_shapefile = dp_shapefile.replace('"', '')
    roadlines_layer = 'RoadLines'
    drainpoints_layer = 'DrainPoints'

    # get the current map document
    mxd = arcpy.mapping.MapDocument("CURRENT")
    df = arcpy.mapping.ListDataFrames(mxd, "Layers")[0]

    # remove if the shape file layers currently in the map document
    for lyr in arcpy.mapping.ListLayers(mxd, "", df):
        if lyr.name.lower() == roadlines_layer.lower() or lyr.name.lower(
        ) == drainpoints_layer.lower():
            # delete layer object from memory
            arcpy.Delete_management(lyr.name)

    # create feature layer from the roadlines shape file
    arcpy.MakeFeatureLayer_management(rd_shapefile, roadlines_layer)

    # create feature layer from the drainpoints shape file
    arcpy.MakeFeatureLayer_management(dp_shapefile, drainpoints_layer)

    addLayer_roadLines = arcpy.mapping.Layer(roadlines_layer)
    addLayer_drainPoints = arcpy.mapping.Layer(drainpoints_layer)

    graip_db_file = graip_db_file.replace('"', '')
    graip_roadlines_table = os.path.join(graip_db_file, "RoadLines")
    graip_drainpoints_table = os.path.join(graip_db_file, "DrainPoints")

    # join the roasline layer to the roadlines db table
コード例 #10
0
def combFIS(in_network, model_run, scratch, max_DA_thresh):
    arcpy.env.overwriteOutput = True

    # get list of all fields in the flowline network
    fields = [f.name for f in arcpy.ListFields(in_network)]

    # set the carrying capacity and vegetation field depending on whether potential or existing run
    if model_run == 'pt':
        out_field = "oCC_PT"
        veg_field = "oVC_PT"
        mcc_field = "mCC_PT_Ct"
    else:
        out_field = "oCC_EX"
        veg_field = "oVC_EX"
        mcc_field = "mCC_EX_Ct"

    # check for oCC_* field in the network attribute table and delete if exists
    if out_field in fields:
        arcpy.DeleteField_management(in_network, out_field)

    # get arrays for fields of interest
    segid_np = arcpy.da.FeatureClassToNumPyArray(in_network, "ReachID")
    ovc_np = arcpy.da.FeatureClassToNumPyArray(in_network, veg_field)
    ihydsp2_np = arcpy.da.FeatureClassToNumPyArray(in_network, "iHyd_SP2")
    ihydsplow_np = arcpy.da.FeatureClassToNumPyArray(in_network, "iHyd_SPLow")
    igeoslope_np = arcpy.da.FeatureClassToNumPyArray(in_network, "iGeo_Slope")

    segid_array = np.asarray(segid_np, np.int64)
    ovc_array = np.asarray(ovc_np, np.float64)
    ihydsp2_array = np.asarray(ihydsp2_np, np.float64)
    ihydsplow_array = np.asarray(ihydsplow_np, np.float64)
    igeoslope_array = np.asarray(igeoslope_np, np.float64)

    # check that inputs are within range of fis
    # if not, re-assign the value to just within range
    ovc_array[ovc_array < 0] = 0
    ovc_array[ovc_array > 45] = 45
    ihydsp2_array[ihydsp2_array < 0] = 0.0001
    ihydsp2_array[ihydsp2_array > 10000] = 10000
    ihydsplow_array[ihydsplow_array < 0] = 0.0001
    ihydsplow_array[ihydsplow_array > 10000] = 10000
    igeoslope_array[igeoslope_array > 1] = 1

    # delete temp arrays
    items = [segid_np, ovc_np, ihydsp2_np, ihydsplow_np, igeoslope_np]
    for item in items:
        del item

    # create antecedent (input) and consequent (output) objects to hold universe variables and membership functions
    ovc = ctrl.Antecedent(np.arange(0, 45, 0.01), 'input1')
    sp2 = ctrl.Antecedent(np.arange(0, 10000, 1), 'input2')
    splow = ctrl.Antecedent(np.arange(0, 10000, 1), 'input3')
    slope = ctrl.Antecedent(np.arange(0, 1, 0.0001), 'input4')
    density = ctrl.Consequent(np.arange(0, 45, 0.01), 'result')

    # build membership functions for each antecedent and consequent object
    ovc['none'] = fuzz.trimf(ovc.universe, [0, 0, 0.1])
    ovc['rare'] = fuzz.trapmf(ovc.universe, [0, 0.1, 0.5, 1.5])
    ovc['occasional'] = fuzz.trapmf(ovc.universe, [0.5, 1.5, 4, 8])
    ovc['frequent'] = fuzz.trapmf(ovc.universe, [4, 8, 12, 25])
    ovc['pervasive'] = fuzz.trapmf(ovc.universe, [12, 25, 45, 45])

    sp2['persists'] = fuzz.trapmf(sp2.universe, [0, 0, 1000, 1200])
    sp2['breach'] = fuzz.trimf(sp2.universe, [1000, 1200, 1600])
    sp2['oblowout'] = fuzz.trimf(sp2.universe, [1200, 1600, 2400])
    sp2['blowout'] = fuzz.trapmf(sp2.universe, [1600, 2400, 10000, 10000])

    splow['can'] = fuzz.trapmf(splow.universe, [0, 0, 150, 175])
    splow['probably'] = fuzz.trapmf(splow.universe, [150, 175, 180, 190])
    splow['cannot'] = fuzz.trapmf(splow.universe, [180, 190, 10000, 10000])

    slope['flat'] = fuzz.trapmf(slope.universe, [0, 0, 0.0002, 0.005])
    slope['can'] = fuzz.trapmf(slope.universe, [0.0002, 0.005, 0.12, 0.15])
    slope['probably'] = fuzz.trapmf(slope.universe, [0.12, 0.15, 0.17, 0.23])
    slope['cannot'] = fuzz.trapmf(slope.universe, [0.17, 0.23, 1, 1])

    density['none'] = fuzz.trimf(density.universe, [0, 0, 0.1])
    density['rare'] = fuzz.trapmf(density.universe, [0, 0.1, 0.5, 1.5])
    density['occasional'] = fuzz.trapmf(density.universe, [0.5, 1.5, 4, 8])
    density['frequent'] = fuzz.trapmf(density.universe, [4, 8, 12, 25])
    density['pervasive'] = fuzz.trapmf(density.universe, [12, 25, 45, 45])

    # build fis rule table
    rule1 = ctrl.Rule(ovc['none'], density['none'])
    rule2 = ctrl.Rule(splow['cannot'], density['none'])
    rule3 = ctrl.Rule(slope['cannot'], density['none'])
    rule4 = ctrl.Rule(ovc['rare'] & sp2['persists'] & splow['can'] & ~slope['cannot'], density['rare'])
    rule5 = ctrl.Rule(ovc['rare'] & sp2['persists'] & splow['probably'] & ~slope['cannot'], density['rare'])
    rule6 = ctrl.Rule(ovc['rare'] & sp2['breach'] & splow['can'] & ~slope['cannot'], density['rare'])
    rule7 = ctrl.Rule(ovc['rare'] & sp2['breach'] & splow['probably'] & ~slope['cannot'], density['rare'])
    rule8 = ctrl.Rule(ovc['rare'] & sp2['oblowout'] & splow['can'] & ~slope['cannot'], density['rare'])
    rule9 = ctrl.Rule(ovc['rare'] & sp2['oblowout'] & splow['probably'] & ~slope['cannot'], density['rare'])
    rule10 = ctrl.Rule(ovc['rare'] & sp2['blowout'] & splow['can'] & ~slope['cannot'], density['none'])
    rule11 = ctrl.Rule(ovc['rare'] & sp2['blowout'] & splow['probably'] & ~slope['cannot'], density['none'])
    rule12 = ctrl.Rule(ovc['occasional'] & sp2['persists'] & splow['can'] & ~slope['cannot'], density['occasional'])
    rule13 = ctrl.Rule(ovc['occasional'] & sp2['persists'] & splow['probably'] & ~slope['cannot'], density['occasional'])
    rule14 = ctrl.Rule(ovc['occasional'] & sp2['breach'] & splow['can'] & ~slope['cannot'], density['occasional'])
    rule15 = ctrl.Rule(ovc['occasional'] & sp2['breach'] & splow['probably'] & ~slope['cannot'], density['occasional'])
    rule16 = ctrl.Rule(ovc['occasional'] & sp2['oblowout'] & splow['can'] & ~slope['cannot'], density['occasional'])
    rule17 = ctrl.Rule(ovc['occasional'] & sp2['oblowout'] & splow['probably'] & ~slope['cannot'], density['occasional'])
    rule18 = ctrl.Rule(ovc['occasional'] & sp2['blowout'] & splow['can'] & ~slope['cannot'], density['rare'])
    rule19 = ctrl.Rule(ovc['occasional'] & sp2['blowout'] & splow['probably'] & ~slope['cannot'], density['rare'])
    rule20 = ctrl.Rule(ovc['frequent'] & sp2['persists'] & splow['can'] & slope['flat'], density['occasional'])
    rule21 = ctrl.Rule(ovc['frequent'] & sp2['persists'] & splow['can'] & slope['can'], density['frequent'])
    rule22 = ctrl.Rule(ovc['frequent'] & sp2['persists'] & splow['can'] & slope['probably'], density['occasional'])
    rule23 = ctrl.Rule(ovc['frequent'] & sp2['persists'] & splow['probably'] & slope['flat'], density['occasional'])
    rule24 = ctrl.Rule(ovc['frequent'] & sp2['persists'] & splow['probably'] & slope['can'], density['frequent'])
    rule25 = ctrl.Rule(ovc['frequent'] & sp2['persists'] & splow['probably'] & slope['probably'], density['occasional'])
    rule26 = ctrl.Rule(ovc['frequent'] & sp2['breach'] & splow['can'] & slope['flat'], density['occasional'])
    rule27 = ctrl.Rule(ovc['frequent'] & sp2['breach'] & splow['can'] & slope['can'], density['frequent'])
    rule28 = ctrl.Rule(ovc['frequent'] & sp2['breach'] & splow['can'] & slope['probably'], density['occasional'])
    rule29 = ctrl.Rule(ovc['frequent'] & sp2['breach'] & splow['probably'] & slope['flat'], density['occasional'])
    rule30 = ctrl.Rule(ovc['frequent'] & sp2['breach'] & splow['probably'] & slope['can'], density['frequent'])
    rule31 = ctrl.Rule(ovc['frequent'] & sp2['breach'] & splow['probably'] & slope['probably'], density['occasional'])
    rule32 = ctrl.Rule(ovc['frequent'] & sp2['oblowout'] & splow['can'] & slope['flat'], density['occasional'])
    rule33 = ctrl.Rule(ovc['frequent'] & sp2['oblowout'] & splow['can'] & slope['can'], density['frequent'])
    rule34 = ctrl.Rule(ovc['frequent'] & sp2['oblowout'] & splow['can'] & slope['probably'], density['occasional'])
    rule35 = ctrl.Rule(ovc['frequent'] & sp2['oblowout'] & splow['probably'] & slope['flat'], density['rare'])
    rule36 = ctrl.Rule(ovc['frequent'] & sp2['oblowout'] & splow['probably'] & slope['can'], density['occasional'])
    rule37 = ctrl.Rule(ovc['frequent'] & sp2['oblowout'] & splow['probably'] & slope['probably'], density['rare'])
    rule38 = ctrl.Rule(ovc['frequent'] & sp2['blowout'] & splow['can'] & slope['flat'], density['rare'])
    rule39 = ctrl.Rule(ovc['frequent'] & sp2['blowout'] & splow['can'] & slope['can'], density['rare'])
    rule40 = ctrl.Rule(ovc['frequent'] & sp2['blowout'] & splow['can'] & slope['probably'], density['rare'])
    rule41 = ctrl.Rule(ovc['frequent'] & sp2['blowout'] & splow['probably'] & slope['flat'], density['rare'])
    rule42 = ctrl.Rule(ovc['frequent'] & sp2['blowout'] & splow['probably'] & slope['can'], density['rare'])
    rule43 = ctrl.Rule(ovc['frequent'] & sp2['blowout'] & splow['probably'] & slope['probably'], density['rare'])
    rule44 = ctrl.Rule(ovc['pervasive'] & sp2['persists'] & splow['can'] & slope['flat'], density['frequent'])
    rule45 = ctrl.Rule(ovc['pervasive'] & sp2['persists'] & splow['can'] & slope['can'], density['pervasive'])
    rule46 = ctrl.Rule(ovc['pervasive'] & sp2['persists'] & splow['can'] & slope['probably'], density['frequent'])
    rule47 = ctrl.Rule(ovc['pervasive'] & sp2['persists'] & splow['probably'] & slope['flat'], density['frequent'])
    rule48 = ctrl.Rule(ovc['pervasive'] & sp2['persists'] & splow['probably'] & slope['can'], density['pervasive'])
    rule49 = ctrl.Rule(ovc['pervasive'] & sp2['persists'] & splow['probably'] & slope['probably'], density['frequent'])
    rule50 = ctrl.Rule(ovc['pervasive'] & sp2['breach'] & splow['can'] & slope['flat'], density['frequent'])
    rule51 = ctrl.Rule(ovc['pervasive'] & sp2['breach'] & splow['can'] & slope['can'], density['pervasive'])
    rule52 = ctrl.Rule(ovc['pervasive'] & sp2['breach'] & splow['can'] & slope['probably'], density['frequent'])
    rule53 = ctrl.Rule(ovc['pervasive'] & sp2['breach'] & splow['probably'] & slope['flat'], density['frequent'])
    rule54 = ctrl.Rule(ovc['pervasive'] & sp2['breach'] & splow['probably'] & slope['can'], density['pervasive'])
    rule55 = ctrl.Rule(ovc['pervasive'] & sp2['breach'] & splow['probably'] & slope['probably'], density['frequent'])
    rule56 = ctrl.Rule(ovc['pervasive'] & sp2['oblowout'] & splow['can'] & slope['flat'], density['frequent'])
    rule57 = ctrl.Rule(ovc['pervasive'] & sp2['oblowout'] & splow['can'] & slope['can'], density['pervasive'])
    rule58 = ctrl.Rule(ovc['pervasive'] & sp2['oblowout'] & splow['can'] & slope['probably'], density['frequent'])
    rule59 = ctrl.Rule(ovc['pervasive'] & sp2['oblowout'] & splow['probably'] & slope['flat'], density['occasional'])
    rule60 = ctrl.Rule(ovc['pervasive'] & sp2['oblowout'] & splow['probably'] & slope['can'], density['frequent'])
    rule61 = ctrl.Rule(ovc['pervasive'] & sp2['oblowout'] & splow['probably'] & slope['probably'], density['occasional'])
    rule62 = ctrl.Rule(ovc['pervasive'] & sp2['blowout'] & splow['can'] & slope['flat'], density['occasional'])
    rule63 = ctrl.Rule(ovc['pervasive'] & sp2['blowout'] & splow['can'] & slope['can'], density['occasional'])
    rule64 = ctrl.Rule(ovc['pervasive'] & sp2['blowout'] & splow['can'] & slope['probably'], density['rare'])
    rule65 = ctrl.Rule(ovc['pervasive'] & sp2['blowout'] & splow['probably'] & slope['flat'], density['occasional'])
    rule66 = ctrl.Rule(ovc['pervasive'] & sp2['blowout'] & splow['probably'] & slope['can'], density['occasional'])
    rule67 = ctrl.Rule(ovc['pervasive'] & sp2['blowout'] & splow['probably'] & slope['probably'], density['rare'])

    comb_ctrl = ctrl.ControlSystem([rule1, rule2, rule3, rule4, rule5, rule6, rule7, rule8, rule9, rule10, rule11, rule12,
                                    rule13, rule14, rule15, rule16, rule17, rule18, rule19, rule20, rule21, rule22, rule23,
                                    rule24, rule25, rule26, rule27, rule28, rule29, rule30, rule31, rule32, rule33, rule34,
                                    rule35, rule36, rule37, rule38, rule39, rule40, rule41, rule42, rule43, rule44, rule45,
                                    rule46, rule47, rule48, rule49, rule50, rule51, rule52, rule53, rule54, rule55, rule56,
                                    rule57, rule58, rule59, rule60, rule61, rule62, rule63, rule64, rule65, rule66, rule67])
    comb_fis = ctrl.ControlSystemSimulation(comb_ctrl)

    # run fuzzy inference system on inputs and defuzzify output
    out = np.zeros(len(ovc_array)) # todo: test this using nas instead of zeros
    for i in range(len(out)):
        comb_fis.input['input1'] = ovc_array[i]
        comb_fis.input['input2'] = ihydsp2_array[i]
        comb_fis.input['input3'] = ihydsplow_array[i]
        comb_fis.input['input4'] = igeoslope_array[i]
        comb_fis.compute()
        out[i] = comb_fis.output['result']

    # save fuzzy inference system output as table
    columns = np.column_stack((segid_array, out))
    out_table = os.path.dirname(in_network) + "/" + out_field + "_Table.txt"  # todo: see if possible to skip this step
    np.savetxt(out_table, columns, delimiter = ",", header = "ReachID, " + out_field, comments = "")
    occ_table = scratch + "/" + out_field + "Tbl"
    arcpy.CopyRows_management(out_table, occ_table)

    # join the fuzzy inference system output to the flowline network
    # create empty dictionary to hold input table field values
    tblDict = {}
    # add values to dictionary
    with arcpy.da.SearchCursor(occ_table, ['ReachID', out_field]) as cursor:
        for row in cursor:
            tblDict[row[0]] = row[1]
    # populate flowline network out field
    arcpy.AddField_management(in_network, out_field, 'DOUBLE')
    with arcpy.da.UpdateCursor(in_network, ['ReachID', out_field]) as cursor:
        for row in cursor:
            try:
                aKey = row[0]
                row[1] = tblDict[aKey]
                cursor.updateRow(row)
            except:
                pass
    tblDict.clear()

    # calculate defuzzified centroid value for density 'none' MF group
    # this will be used to re-classify output values that fall in this group
    # important: will need to update the array (x) and MF values (mfx) if the
    #            density 'none' values are changed in the model
    x = np.arange(0, 45, 0.01)
    mfx = fuzz.trimf(x, [0, 0, 0.1])
    defuzz_centroid = round(fuzz.defuzz(x, mfx, 'centroid'), 6)

    # update combined capacity (occ_*) values in stream network
    # correct for occ_* greater than ovc_* as vegetation is most limiting factor in model
    # (i.e., combined fis value should not be greater than the vegetation capacity)
    # set occ_* to 0 if the drainage area is greater than the user defined threshold
    # this enforces a stream size threshold above which beaver dams won't persist and/or won't be built
    # set occ_* to 0 if output falls fully in 'none' category

    with arcpy.da.UpdateCursor(in_network, [out_field, veg_field, 'iGeo_DA', 'iGeo_Slope']) as cursor:
        for row in cursor:
            if row[0] > row[1]:
                row[0] = row[1]
            if row[2] >= float(max_DA_thresh):
                row[0] = 0.0
            if round(row[0], 6) == defuzz_centroid:
                row[0] = 0.0
            cursor.updateRow(row)

    # delete temporary tables and arrays
    arcpy.Delete_management(out_table)
    arcpy.Delete_management(occ_table)
    items = [columns, out, x, mfx, defuzz_centroid]
    for item in items:
        del item

    # calculate dam count (mCC_**_Ct) for each reach as density * reach length
    arcpy.AddField_management(in_network, mcc_field, 'DOUBLE')
    with arcpy.da.UpdateCursor(in_network, [mcc_field, out_field, 'iGeo_Len']) as cursor:
        for row in cursor:
            len_km = row[2] * 0.001
            row[0] = row[1] * len_km
            cursor.updateRow(row)
コード例 #11
0
    elif ("symbol_id" in fieldNameList):
        symbolNameFieldName = "Symbol_ID"
    elif ("symbolrule" in fieldNameList):
        symbolNameFieldName = "symbolrule"
    updatefields.append(symbolNameFieldName)

    if (EchelonField in fieldNameList):
        updatefields.append(EchelonField)

    for field in desc.Fields:
        if field.name in updatefields:
            # Get domain if any
            if (field.domain is not None and field.domain != ""):
                fieldNameToDomainName[field.name] = field.domain
                if arcpy.Exists("in_memory/" + field.domain):
                    arcpy.Delete_management("in_memory/" + field.domain)
                try:
                    #If path is feature class
                    arcpy.DomainToTable_management(desc.path, field.domain,
                                                   "in_memory/" + field.domain,
                                                   CODE_FIELD_NAME,
                                                   DESCRIPTION_FIELD_NAME)
                except:
                    #If path is feature dataset
                    arcpy.DomainToTable_management(
                        arcpy.Describe(desc.path).path, field.domain,
                        "in_memory/" + field.domain, CODE_FIELD_NAME,
                        DESCRIPTION_FIELD_NAME)

    with arcpy.da.UpdateCursor(inputFC, updatefields) as cursor:
        for row in cursor:
コード例 #12
0
ファイル: ValveTurnQC.py プロジェクト: jzhengli/pugisQC
try:
	arcpy.env.workspace = RPUDWorkspace
	inFCName = valveToQC
	inFC = RPUDWorkspace + "/" + inFCName
	dsc = arcpy.Describe(inFCName)
	fields = dsc.fields
	fieldNames = [field.name for field in fields if field.name != dsc.OIDFieldName]
	diameterIdx = fieldNames.index("DIAMETER")
	turnsIdx = fieldNames.index("TURNSTOCLOSE")

	arcpy.env.workspace = fileDBWorkspace
	outFCName = "ValveTurnsQC"
	outFC = fileDBWorkspace + "/" + outFCName
	try:
		if arcpy.Exists(outFCName):
			arcpy.Delete_management(outFCName)
	except Exception:
		print ("Cannot get a lock, try close all ArcGIS windows")
		exit()
	arcpy.CreateFeatureclass_management(fileDBWorkspace, outFCName, "POINT", inFC, "DISABLED", "DISABLED", arcpy.Describe(inFC).spatialReference)
	# print arcpy.Describe(fileDBWorkspace + "/" + outFCName)

	arcpy.env.workspace = RPUDWorkspace

	whereClause = "DIAMETER IS NOT NULL AND TURNSTOCLOSE IS NOT NULL ORDER BY DIAMETER" # find all features with valid diameter and turns-to-close value
	with arcpy.da.SearchCursor(inFC, fieldNames, whereClause) as sCursor:
		with arcpy.da.InsertCursor(outFC, fieldNames) as iCursor:
			for row in sCursor:
				sigma = int((round(row[diameterIdx] * 0.25))) # custom range for verify turns to close based on diameter
				if row[diameterIdx] == 0:
					iCursor.insertRow(row)
コード例 #13
0
            uCur.updateRow([arcpy.Polyline(partA), seg[1], seg[2]])
            seg = uCur.next()
    except StopIteration:
        return True


###******************* Part I *****************
#Step 0
arcpy.FeatureToLine_management(mupolygon, "MU_lines", "", "NO_ATTRIBUTES")
arcpy.Dissolve_management("MU_lines", "MU_lines_dis", "",\
                          "", "SINGLE_PART", "DISSOLVE_LINES")

arcpy.FeatureToPoint_management(mupolygon, "MU_point", "INSIDE")

arcpy.MakeFeatureLayer_management('MU_lines_dis', 'MU_lines_select')
arcpy.Delete_management('MU_lines_dis')
arcpy.Delete_management("MU_lines")
if query:
    arcpy.MakeFeatureLayer_management(mupolygon, 'MU_select', query)
    arcpy.SelectLayerByLocation_management("MU_lines_select","SHARE_A_LINE_SEGMENT_WITH",\
    'MU_select',"#","NEW_SELECTION")
    arcpy.SelectLayerByLocation_management("MU_lines_select","SHARE_A_LINE_SEGMENT_WITH",sapolygon,\
                                       '#',"REMOVE_FROM_SELECTION")
else:
    arcpy.SelectLayerByLocation_management("MU_lines_select","SHARE_A_LINE_SEGMENT_WITH",sapolygon,\
                                       '#',"NEW_SELECTION","INVERT")

arcpy.SmoothLine_cartography("MU_lines_select", "MU_lines_gen", "PAEK",\
                             "25 Meters","FIXED_CLOSED_ENDPOINT", "NO_CHECK")
arcpy.Generalize_edit("MU_lines_gen", "2 Meters")
arcpy.SelectLayerByAttribute_management("MU_lines_select", "SWITCH_SELECTION")
コード例 #14
0
    u'"RCNT_AREA', u'CORE_1'
]
for f in l:
    if arcpy.ListFields(outShape2, f):
        arcpy.DeleteField_management(outShape2, f)

#Send output to ArcMap
arcpy.SetParameterAsText(7, outShapefile)
params = arcpy.GetParameterInfo()

# Clean up temporary workspace
try:
    fl.CleanFiles(sWorkspace)
    #fl.CleanFiles(tWorkspace)
    #arcpy.Delete_management(tWorkspace,"")
    arcpy.Delete_management(sWorkspace, "")
except:
    pass
try:
    arcpy.Delete_management("in_memory")
except:
    pass

### Display normal result in messagebox
##from Tkinter import *
##import tkMessageBox
##root = Tk()
##root.withdraw()
##tkMessageBox.showinfo(
##    title="Housing Density", \
##    message="Target: " + str(result) + "% of area conserved as core habitat\n Overall density: " + str(density) + " " + outputUnits + " per house")
コード例 #15
0
ファイル: convert.py プロジェクト: ndanielsen/arc-open
    def execute(self, parameters, messages):
        """Runs the script"""

        # Get the user's input
        fc = parameters[0].valueAsText
        field_mappings = parameters[1].valueAsText
        fields = parameters[1].valueAsText.split(';')
        fields.append('SHAPE@XY')
        output_dir = parameters[2].valueAsText
        output_name = parameters[3].valueAsText
        convert_to_wgs84 = self.toBool(parameters[4].valueAsText)
        convert_to_geojson = self.toBool(parameters[5].valueAsText)
        convert_to_kmz = self.toBool(parameters[6].valueAsText)
        convert_to_csv = self.toBool(parameters[7].valueAsText)
        convert_metadata = self.toBool(parameters[8].valueAsText)
        debug = self.toBool(parameters[9].valueAsText)

        # Setup vars
        output_path = output_dir + '\\' + output_name
        shp_output_path = output_dir + '\\shapefile'
        shp_temp_output_path = output_dir + '\\shapefile\\temp\\'
        shapefile = shp_output_path + '\\' + output_name + '.shp'
        temp_shapefile = shp_output_path + '\\temp\\' + output_name + '.shp'

        if debug:
            AddMessage('Field infos:')
            AddMessage(field_mappings)

        try:
            arcpy.Delete_management('temp_layer')
        except:
            if debug:
                AddMessage('Did not have a temp_layer feature ' +
                           'class to delete')

        if not os.path.exists(shp_output_path):
            os.makedirs(shp_output_path)
            if debug:
                AddMessage('Created directory ' + shp_output_path)

        if not os.path.exists(shp_temp_output_path):
            os.makedirs(shp_temp_output_path)
        else:
            for file in os.listdir(shp_temp_output_path):
                file_path = os.path.join(shp_temp_output_path, file)
                try:
                    if os.path.isfile(file_path):
                        os.unlink(file_path)
                except:
                    AddWarning('Unable to delete ' + file +
                               'from the temp folder. This ' +
                               'may become a problem later')
                    pass

        arcpy.MakeFeatureLayer_management(fc, 'temp_layer', '', '',
                                          field_mappings)
        arcpy.CopyFeatures_management('temp_layer', temp_shapefile)

        if convert_to_wgs84:
            AddMessage('Converting spatial reference to WGS84...')
            arcpy.Project_management(
                temp_shapefile, shapefile,
                "GEOGCS['GCS_WGS_1984',DATUM['D_WGS_1984',SPHEROID['WGS_1984',6378137.0,298.257223563]],PRIMEM['Greenwich',0.0],UNIT['Degree',0.0174532925199433],METADATA['World',-180.0,-90.0,180.0,90.0,0.0,0.0174532925199433,0.0,1262]]",
                "WGS_1984_(ITRF00)_To_NAD_1983",
                "PROJCS['NAD_1983_StatePlane_Pennsylvania_South_FIPS_3702_Feet',GEOGCS['GCS_North_American_1983',DATUM['D_North_American_1983',SPHEROID['GRS_1980',6378137.0,298.257222101]],PRIMEM['Greenwich',0.0],UNIT['Degree',0.0174532925199433]],PROJECTION['Lambert_Conformal_Conic'],PARAMETER['False_Easting',1968500.0],PARAMETER['False_Northing',0.0],PARAMETER['Central_Meridian',-77.75],PARAMETER['Standard_Parallel_1',39.93333333333333],PARAMETER['Standard_Parallel_2',40.96666666666667],PARAMETER['Latitude_Of_Origin',39.33333333333334],UNIT['Foot_US',0.3048006096012192]]"
            )
            AddMessage('Projection conversion completed.')
        else:
            AddMessage('Exporting shapefile already in WGS84...')
            arcpy.FeatureClassToShapefile_conversion(temp_shapefile,
                                                     shp_output_path)

        try:
            arcpy.Delete_management('temp_layer')
        except:
            AddError('Unable to delete in_memory feature class')

        AddMessage('Compressing the shapefile to a .zip file...')

        export = Export(output_dir, output_name, debug)

        zip = export.zip()
        if zip:
            AddMessage('Finished creating ZIP archive')

        if convert_to_geojson:
            AddMessage('Converting to GeoJSON...')
            output = output_path + '.geojson'
            geojson = esri2open.toOpen(shapefile,
                                       output,
                                       includeGeometry='geojson')
            if geojson:
                AddMessage('Finished converting to GeoJSON')

        if convert_to_kmz:
            AddMessage('Converting to KML...')
            kmz = export.kmz()
            if kmz:
                AddMessage('Finished converting to KMZ')

        if convert_to_csv:
            AddMessage('Converting to CSV...')
            csv = export.csv()
            if csv:
                AddMessage('Finished converting to CSV')

        if convert_metadata:
            AddMessage('Converting metadata to Markdown ' +
                       'README.md file...')
            md = export.md()
            if md:
                AddMessage('Finished converting metadata to ' +
                           'Markdown README.md file')

        # Delete the /temp directory because we're done with it
        shutil.rmtree(shp_output_path + '\\temp')
        if (debug):
            AddMessage('Deleted the /temp folder because we don\'t' +
                       ' need it anymore')

        return
コード例 #16
0
                elif end_points == "END":
                    insert.insertRow((end, oid, length))

                elif end_points == "BOTH":
                    insert.insertRow((start, oid, 0))
                    insert.insertRow((end, oid, length))

                arcpy.SetProgressorPosition()

            except Exception as e:
                arcpy.AddMessage(str(e.message))

                ################################################################

line_keyfield = str(arcpy.ListFields(polyline, "", "OID")[0].name)

mem_point_fl = arcpy.MakeFeatureLayer_management(mem_point, "Points_memory")

arcpy.AddJoin_management(mem_point_fl, "LineOID", polyline, line_keyfield)

if "in_memory" in output:
    arcpy.SetParameter(8, mem_point_fl)

else:
    arcpy.CopyFeatures_management(mem_point_fl, output)

    arcpy.Delete_management(mem_point)
    arcpy.Delete_management(mem_point_fl)

arcpy.ResetProgressor()
arcpy.GetMessages()
コード例 #17
0
def parks_reps(gdb, uttl, parks, epsg, pixel, config, id_join):

    results_folder = os.path.dirname(gdb)
    temp_folder = os.path.join(results_folder, 'temp')

    arcpy.env.workspace = results_folder
    arcpy.env.overwriteOutput = True

    xls_file = pd.ExcelFile(config)
    df_criteria = xls_file.parse('Representatividad', index_col='Clases')

    if arcpy.Exists(os.path.join(temp_folder, 'Shape_Reps_Reprojected.shp')):
        arcpy.Delete_management(
            os.path.join(temp_folder, 'Shape_Reps_Reprojected.shp'))

    if arcpy.Exists(os.path.join(gdb, 'Reps_Table')):
        arcpy.Delete_management(os.path.join(gdb, 'Reps_Table'))
        arcpy.DeleteField_management(
            uttl, [u'RUNAP', u'Reps_Value_Calc', u'Reps_Value', u'Reps_Clase'])

    if arcpy.Exists(os.path.join(temp_folder, 'UTTL_Basins.shp')):
        arcpy.Delete_management(os.path.join(temp_folder, 'UTTL_Basins.shp'))

    if arcpy.Exists(os.path.join(temp_folder, 'RUNAP_Table.dbf')):
        arcpy.Delete_management(os.path.join(temp_folder, 'RUNAP_Table.dbf'))

    arcpy.Project_management(in_dataset=parks,
                             out_dataset=os.path.join(
                                 temp_folder, 'Shape_Reps_Reprojected.shp'),
                             out_coor_system=epsg)

    arcpy.FeatureToRaster_conversion(
        os.path.join(temp_folder, 'Shape_Reps_Reprojected.shp'), "OBJECTID",
        os.path.join(temp_folder, 'RUNAP.tif'), pixel)

    arcpy.MakeFeatureLayer_management(uttl, 'UTTL')
    arcpy.CopyFeatures_management(
        'UTTL', os.path.join(temp_folder, r'UTTL_Basins.shp'))
    arcpy.gp.ZonalStatisticsAsTable_sa(
        os.path.join(temp_folder, 'UTTL_Basins.shp'), id_join,
        os.path.join(temp_folder, 'RUNAP.tif'),
        os.path.join(temp_folder, 'RUNAP_Table.dbf'), "DATA", "MEAN")
    table2csv(os.path.join(temp_folder, 'RUNAP_Table.dbf'),
              os.path.join(temp_folder, 'RUNAP_Table.csv'))
    table2csv(uttl, os.path.join(temp_folder, 'UTTL_Table_Areas.csv'))

    df_runap = pd.DataFrame(
        pd.read_csv(os.path.join(temp_folder, 'RUNAP_Table.csv'),
                    index_col=id_join)['AREA'])
    df_uttl = pd.DataFrame(
        pd.read_csv(os.path.join(temp_folder, 'UTTL_Table_Areas.csv'),
                    index_col=id_join)['Shape_Area'])

    df_uttl['RUNAP'] = 0.0
    df_uttl.ix[df_runap.index, 'RUNAP'] = df_runap['AREA']
    df_uttl['Reps_Value_Calc'] = (df_uttl['RUNAP'] /
                                  df_uttl['Shape_Area']) * 100.

    df_uttl['Reps_Value'] = df_criteria.ix['No RP', 'Value']
    df_uttl['Reps_Clase'] = 'No Representativo'

    df_index_query = df_uttl[
        (df_uttl['Reps_Value_Calc'] > df_criteria.ix['Baja', 'Inferior'])
        & (df_uttl['Reps_Value_Calc'] <= df_criteria.ix['Baja',
                                                        'Superior'])].index
    df_uttl.ix[df_index_query, 'Reps_Value'] = df_criteria.ix['Baja', 'Value']
    df_uttl.ix[df_index_query, 'Reps_Clase'] = 'Baja'

    df_index_query = df_uttl[
        (df_uttl['Reps_Value_Calc'] > df_criteria.ix['Media', 'Inferior'])
        & (df_uttl['Reps_Value_Calc'] <= df_criteria.ix['Media',
                                                        'Superior'])].index
    df_uttl.ix[df_index_query, 'Reps_Value'] = df_criteria.ix['Media', 'Value']
    df_uttl.ix[df_index_query, 'Reps_Clase'] = 'Media'

    df_index_query = df_uttl[
        (df_uttl['Reps_Value_Calc'] > df_criteria.ix['Alta', 'Inferior'])
        & (df_uttl['Reps_Value_Calc'] <= df_criteria.ix['Alta',
                                                        'Superior'])].index
    df_uttl.ix[df_index_query, 'Reps_Value'] = df_criteria.ix['Alta', 'Value']
    df_uttl.ix[df_index_query, 'Reps_Clase'] = 'Alta'

    df_index_query = df_uttl[
        (df_uttl['Reps_Value_Calc'] > df_criteria.ix['Muy Alta', 'Inferior'])
        & (df_uttl['Reps_Value_Calc'] <= df_criteria.ix['Muy Alta',
                                                        'Superior'])].index
    df_uttl.ix[df_index_query, 'Reps_Value'] = df_criteria.ix['Muy Alta',
                                                              'Value']
    df_uttl.ix[df_index_query, 'Reps_Clase'] = 'Muy Alta'

    df_join = df_uttl[[
        u'RUNAP', u'Reps_Value_Calc', u'Reps_Value', u'Reps_Clase'
    ]].copy()
    df_join.to_csv(os.path.join(temp_folder, 'Reps_Table_Join.csv'))

    arcpy.TableToTable_conversion(
        os.path.join(temp_folder, 'Reps_Table_Join.csv'), gdb, 'Reps_Table')

    expression = 'str(!Name!)'
    code_block = ''
    arcpy.AddField_management(os.path.join(gdb, 'Reps_Table'), 'Code', 'TEXT',
                              '', '', '10', '', 'NULLABLE', 'NON_REQUIRED', '')
    arcpy.CalculateField_management(os.path.join(gdb, 'Reps_Table'), 'Code',
                                    expression, 'PYTHON', code_block)

    arcpy.MakeFeatureLayer_management(uttl, 'UTTL')
    arcpy.AddJoin_management('UTTL', 'Name', os.path.join(gdb, 'Reps_Table'),
                             'Code')

    arcpy.CopyFeatures_management('UTTL', os.path.join(gdb,
                                                       r'UTTL_Basins_Reps'))
    arcpy.Delete_management('UTTL')
    arcpy.Delete_management(uttl)

    arcpy.Rename_management(os.path.join(gdb, r'UTTL_Basins_Reps'), uttl)
    clear_layers()
    rename_fields(os.path.join(gdb, r'UTTL_Basins'))
    rename_fields(os.path.join(gdb, r'UTTL_Basins'), r'Reps_Table')

    base_name = ['Reps_Table_OBJECTID', 'Reps_Table_Name', 'Code']
    arcpy.DeleteField_management(uttl, [i for i in base_name])
コード例 #18
0
def lwf_process(OrdSurv_Grid, file_loc, exports, scratch_gdb):

    print("selecting the relevant area to clip")

    ord_grid_fl = arcpy.MakeFeatureLayer_management(OrdSurv_Grid, "lay_selec",
                                                    "", r"in_memory")

    with arcpy.da.SearchCursor(ord_grid_fl, ["GRIDSQ"]) as cursor:
        for row in cursor:
            # if row[0] == 'SX':
            arcpy.env.extent = 'MINOF'
            grid_area = row[0]
            print(grid_area)

            os_grid_fold = os.path.join(exports, grid_area.lower())
            if os.path.exists(os_grid_fold):
                print("OS Grid folder already exists")
            else:
                print("create OS Grid folder folder")
                os.makedirs(os_grid_fold)

            print('select features within Ordnance Survey grid area')
            # expr = ('GRIDSQ = {0}'.format(row[0]))
            expr = """{0} = '{1}'""".format('GRIDSQ', row[0])
            print(expr)

            arcpy.SelectLayerByAttribute_management(ord_grid_fl,
                                                    "NEW_SELECTION", expr)
            temp_zone = r"in_memory/OS_tempZone"
            OS_clip = arcpy.CopyFeatures_management(ord_grid_fl, temp_zone)

            work_area = arcpy.Describe(OS_clip)
            work_ext = work_area.extent
            arcpy.env.extent = work_ext

            # clip_file = r"in_memory/lwf_clip"
            clip_file = os.path.join(scratch_gdb, "lwf_clip")
            print("clipping features to Grid area")
            arcpy.Clip_analysis(file_loc,
                                OS_clip,
                                clip_file,
                                cluster_tolerance=0)
            obj_len = int(str(arcpy.GetCount_management(clip_file)))
            # print(obj_len)
            if obj_len > 0:

                arcpy.AddField_management(clip_file,
                                          field_name='BVI_Val',
                                          field_type='SHORT')
                cursor2 = arcpy.da.UpdateCursor(clip_file, ['BVI_Val'])
                for row2 in cursor2:
                    row2[0] = 4
                    cursor2.updateRow(row2)

                print("creating snap raster")
                # temp_ras = r"in_memory/temp_ras"
                temp_ras = os.path.join(scratch_gdb, "temp_ras")
                arcpy.PolygonToRaster_conversion(in_features=OS_clip,
                                                 value_field='GRIDSQ',
                                                 out_rasterdataset=temp_ras,
                                                 cell_assignment="CELL_CENTER",
                                                 cellsize=5)
                work_area = arcpy.Describe(temp_ras)
                work_ext = work_area.extent
                arcpy.env.extent = work_ext

                arcpy.env.cellsize = temp_ras
                arcpy.env.snapRaster = temp_ras

                # lwf_out_tmp = r"in_memory/lwf_ras_tmp"
                lwf_out_tmp = os.path.join(scratch_gdb, "lwf_ras_tmp")

                print("converting features to raster")
                arcpy.FeatureToRaster_conversion(clip_file, "BVI_Val",
                                                 lwf_out_tmp, 5)

                lwf_out = os.path.join(os_grid_fold,
                                       grid_area + '_lwf_ras.tif')

                print("converting null to zero")
                # arcpy.env.extent = 'MAXOF'
                lwf_ras = Con(IsNull(lwf_out_tmp), 0, lwf_out_tmp)
                # lwf_ras = Con(IsNull(lwf_out_tmp),0,Con((Raster(lwf_out_tmp) > Raster(temp_ras)), Raster(lwf_out_tmp), 0)) # try this is might solve things...
                lwf_ras.save(lwf_out)

                arcpy.Delete_management(r"in_memory")
                arcpy.ClearEnvironment("extent")
                arcpy.ClearEnvironment("snapRaster")
                arcpy.ClearEnvironment("cellsize")
            else:
                print("no features in OS GRID {0}".format(grid_area))


# if __name__ == '__main__':
#     lwf_main(sys.argv[0], sys.argv[1], sys.argv[2], sys.argv[3])
コード例 #19
0
ファイル: mosaic.py プロジェクト: voyagersearch/voyager-py
def execute(request):
    """Mosaics input raster datasets into a new raster dataset.
    :param request: json as a dict.
    """
    parameters = request['params']
    out_coordinate_system = task_utils.get_parameter_value(
        parameters, 'output_projection', 'code')
    # Advanced options
    output_raster_format = task_utils.get_parameter_value(
        parameters, 'raster_format', 'value')
    compression_method = task_utils.get_parameter_value(
        parameters, 'compression_method', 'value')
    compression_quality = task_utils.get_parameter_value(
        parameters, 'compression_quality', 'value')
    output_file_name = task_utils.get_parameter_value(parameters,
                                                      'output_file_name',
                                                      'value')
    if not output_file_name:
        output_file_name = 'output'
    arcpy.env.compression = '{0} {1}'.format(compression_method,
                                             compression_quality)

    clip_area = None
    if not output_raster_format == 'MosaicDataset':
        # Get the clip region as an extent object.
        try:
            clip_area_wkt = task_utils.get_parameter_value(
                parameters, 'processing_extent', 'wkt')
            if not clip_area_wkt:
                clip_area_wkt = 'POLYGON ((-180 -90, -180 90, 180 90, 180 -90, -180 -90))'
            if not out_coordinate_system == '0':
                clip_area = task_utils.get_clip_region(clip_area_wkt,
                                                       out_coordinate_system)
            else:
                clip_area = task_utils.get_clip_region(clip_area_wkt)
        except KeyError:
            pass

    status_writer.send_status(_('Setting the output workspace...'))
    out_workspace = os.path.join(request['folder'], 'temp')
    if not os.path.exists(out_workspace):
        os.makedirs(out_workspace)
    if output_raster_format == 'FileGDB' or output_raster_format == 'MosaicDataset':
        out_workspace = arcpy.CreateFileGDB_management(
            out_workspace, 'output.gdb').getOutput(0)
    arcpy.env.workspace = out_workspace

    status_writer.send_status(_('Starting to process...'))
    num_results, response_index = task_utils.get_result_count(parameters)
    raster_items = None
    if num_results > task_utils.CHUNK_SIZE:
        # Query the index for results in groups of 25.
        query_index = task_utils.QueryIndex(parameters[response_index])
        fl = query_index.fl
        query = '{0}{1}{2}'.format(sys.argv[2].split('=')[1],
                                   '/select?&wt=json', fl)
        fq = query_index.get_fq()
        if fq:
            groups = task_utils.grouper(range(0, num_results),
                                        task_utils.CHUNK_SIZE, '')
            query += fq
        elif 'ids' in parameters[response_index]:
            groups = task_utils.grouper(
                list(parameters[response_index]['ids']), task_utils.CHUNK_SIZE,
                '')
        else:
            groups = task_utils.grouper(range(0, num_results),
                                        task_utils.CHUNK_SIZE, '')

        headers = {
            'x-access-token': task_utils.get_security_token(request['owner'])
        }
        for group in groups:
            if fq:
                results = requests.get(query + "&rows={0}&start={1}".format(
                    task_utils.CHUNK_SIZE, group[0]),
                                       verify=verify_ssl,
                                       headers=headers)
            elif 'ids' in parameters[response_index]:
                results = requests.get(
                    query + '{0}&ids={1}'.format(fl, ','.join(group)),
                    verify=verify_ssl,
                    headers=headers)
            else:
                results = requests.get(query + "&rows={0}&start={1}".format(
                    task_utils.CHUNK_SIZE, group[0]),
                                       verify=verify_ssl,
                                       headers=headers)

            input_items = task_utils.get_input_items(
                results.json()['response']['docs'])
            if not input_items:
                input_items = task_utils.get_input_items(
                    parameters[response_index]['response']['docs'])
            raster_items, pixels, bands, skipped = get_items(input_items)
    else:
        input_items = task_utils.get_input_items(
            parameters[response_index]['response']['docs'])
        raster_items, pixels, bands, skipped = get_items(input_items)

    if not raster_items:
        if skipped == 0:
            status_writer.send_state(status.STAT_FAILED,
                                     _('Invalid input types'))
            skipped_reasons['All Items'] = _('Invalid input types')
            task_utils.report(os.path.join(request['folder'], '__report.json'),
                              len(raster_items),
                              num_results,
                              skipped_details=skipped_reasons)
            return
        else:
            status_writer.send_state(
                status.STAT_WARNING,
                _('{0} results could not be processed').format(skipped))
            task_utils.report(os.path.join(request['folder'], '__report.json'),
                              len(raster_items),
                              skipped,
                              skipped_details=skipped_reasons)
            return

    # Get most common pixel type.
    pixel_type = pixel_types[max(set(pixels), key=pixels.count)]
    if output_raster_format in ('FileGDB', 'GRID', 'MosaicDataset'):
        output_name = arcpy.ValidateTableName('mosaic', out_workspace)
    else:
        output_name = '{0}.{1}'.format(
            arcpy.ValidateTableName('mosaic', out_workspace)[:9],
            output_raster_format.lower())
        status_writer.send_status(output_name)

    if output_raster_format == 'MosaicDataset':
        try:
            status_writer.send_status(
                _('Generating {0}. Large input {1} will take longer to process.'
                  .format('Mosaic', 'rasters')))
            if out_coordinate_system == '0':
                out_coordinate_system = raster_items[0]
            else:
                out_coordinate_system = None
            mosaic_ds = arcpy.CreateMosaicDataset_management(
                out_workspace, output_name, out_coordinate_system, max(bands),
                pixel_type)
            arcpy.AddRastersToMosaicDataset_management(mosaic_ds,
                                                       'Raster Dataset',
                                                       raster_items)
            arcpy.MakeMosaicLayer_management(mosaic_ds, 'mosaic_layer')
            layer_object = arcpy.mapping.Layer('mosaic_layer')
            task_utils.make_thumbnail(
                layer_object, os.path.join(request['folder'], '_thumb.png'))
        except arcpy.ExecuteError:
            status_writer.send_state(status.STAT_FAILED, arcpy.GetMessages(2))
            return
    else:
        try:
            if len(bands) > 1:
                status_writer.send_state(
                    status.STAT_FAILED,
                    _('Input rasters must have the same number of bands'))
                return
            status_writer.send_status(
                _('Generating {0}. Large input {1} will take longer to process.'
                  .format('Mosaic', 'rasters')))
            if out_coordinate_system == '0':
                out_coordinate_system = None
            if clip_area:
                ext = '{0} {1} {2} {3}'.format(clip_area.XMin, clip_area.YMin,
                                               clip_area.XMax, clip_area.YMax)
                tmp_mosaic = arcpy.MosaicToNewRaster_management(
                    raster_items,
                    out_workspace,
                    'tm',
                    out_coordinate_system,
                    pixel_type,
                    number_of_bands=bands.keys()[0])
                status_writer.send_status(_('Clipping...'))
                out_mosaic = arcpy.Clip_management(tmp_mosaic, ext,
                                                   output_name)
                arcpy.Delete_management(tmp_mosaic)
            else:
                out_mosaic = arcpy.MosaicToNewRaster_management(
                    raster_items,
                    out_workspace,
                    output_name,
                    out_coordinate_system,
                    pixel_type,
                    number_of_bands=bands.keys()[0])
            arcpy.MakeRasterLayer_management(out_mosaic, 'mosaic_layer')
            layer_object = arcpy.mapping.Layer('mosaic_layer')
            task_utils.make_thumbnail(
                layer_object, os.path.join(request['folder'], '_thumb.png'))
        except arcpy.ExecuteError:
            status_writer.send_state(status.STAT_FAILED, arcpy.GetMessages(2))
            return

    if arcpy.env.workspace.endswith('.gdb'):
        out_workspace = os.path.dirname(arcpy.env.workspace)
    zip_file = task_utils.zip_data(out_workspace,
                                   '{0}.zip'.format(output_file_name))
    shutil.move(
        zip_file,
        os.path.join(os.path.dirname(out_workspace),
                     os.path.basename(zip_file)))

    # Update state if necessary.
    if skipped > 0:
        status_writer.send_state(
            status.STAT_WARNING,
            _('{0} results could not be processed').format(skipped))
    task_utils.report(os.path.join(request['folder'], '__report.json'),
                      len(raster_items),
                      skipped,
                      skipped_details=skipped_reasons)
コード例 #20
0
    # Make sure that the 'thematic' folder exists.
    if not arcpy.Exists(thematicPath):
        # make sure the thematic folder exists.
        raise MyError, "Thematic data path " + thematicPath + " does not exist"

    # Create file geodatabase in base folder
    outputGDB = os.path.join(basePath, "WSS_ThematicMaps.gdb")
    if not arcpy.Exists(outputGDB):
        arcpy.CreateFileGDB_management(os.path.dirname(outputGDB),
                                       os.path.basename(outputGDB), "10.0")

    # Copy original shapefile to a filegeodatabase featureclass
    outputFC = os.path.join(outputGDB, inputShp)

    if arcpy.Exists(outputFC):
        arcpy.Delete_management(outputFC)

    # Seem to be having problems copying polygons when 0 are selected
    # Try switching selection (unless there already is one)
    iSel = int(arcpy.GetCount_management(inputShp).getOutput(0))
    if iSel == 0:
        arcpy.SelectLayerByAttribute_management(inputShp, "SWITCH_SELECTION")

    arcpy.CopyFeatures_management(inputShp, outputFC)

    if iSel == 0:
        arcpy.SelectLayerByAttribute_management(inputShp, "CLEAR_SELECTION")

    # Create layer file from input shapefile
    wssLayerFile = os.path.join(env.scratchFolder, "wss_thematicmap.lyr")
    arcpy.SaveToLayerFile_management(inputShp, wssLayerFile, "ABSOLUTE", 10.1)
コード例 #21
0
####set local variables
env.workspace="D://data"
mask="D://data/mask.shp"
mxd=arcpy.mapping.MapDocument"D://data/mapname.mxd"

####set up iteration
for lyr in arcpy.mapping.ListLayers(mxd,"*wildcard*"):
	print("Converting kriging layer to raster")
	GALayer=arcpy.GALayerToGrid_ga(lyr,"B:\\Arc Tools\\trial\d1","cell size","1","1")
	print("Extracting by mask")
	EM=ExtractByMask(GALayer,mask)
	EMsave=os.path.join(env_workspace,lyr.name)
	EM.save(EMsave)
	print("Deleting intermediate data")
	arcpy.Delete_management("B:\\Arc Tools\\trial\d1")

	
####rasters####
####iterative extract by mask with 1 raster and multiple masks####

####import local environments
import arcpy
from arcpy import env
import os
from arcpy.sa import *

####set local variables
env.workspace="D://data"
raster="D://data/raster"
コード例 #22
0
replaceExisting = False
dir = os.getcwd()

## IMPORTANT- Review Lines: 11,12, 14, 29, 46

# Incidents
Master_I = r"G:\RestrictedAccess\CoreData\NCIS\LITS_20200729\NCIS.gdb\Incidents_xy"
Master_R = r"G:\RestrictedAccess\CoreData\NCIS\LITS_20200729\NCIS.gdb\Residence_xy"

gdbC = 'ClusterAnalyses_08_17.gdb'
workingGDB = os.path.join(dir, gdbC)
    
# if the gdb exists and replaceExisting evaluates to true
# delete the existing database
if arcpy.Exists(workingGDB) and replaceExisting:
   arcpy.Delete_management(workingGDB)
    
   # if the gdb was deleted or did not already exists
   # create the gdb
if not arcpy.Exists(workingGDB):
   arcpy.CreateFileGDB_management(dir, gdbC)
else:
   print('The output database already exists: ' + gdbC)
   i = input('Delete it 1st or change replaceExisting to True! ')
   sys.exit('Exiting')

gdbL = 'LITS_20200729_08_17.gdb'
workingGDB = os.path.join(dir, gdbL)
    
# if the gdb exists and replaceExisting evaluates to true
# delete the existing database
コード例 #23
0
        if fld_fro in flds: 
            arcpy.DeleteField_management(sp_buf, fld)

print('  repairing (%s)' % (time.strftime('%H:%M:%S')))
arcpy.RepairGeometry_management(sp_buf)

print('  fixing Svalbard (%s)' % (time.strftime('%H:%M:%S')))
arcpy.MakeFeatureLayer_management('%s/%s' % (gdb, sp_buf), 'lyr', '"sp_name"=\'Svalbard\'')
arcpy.CalculateField_management('lyr', 'sp_id', '253', 'PYTHON_9.3')
arcpy.CalculateField_management('lyr', 'sp_key', '"SVA"', 'PYTHON_9.3')

print('  erasing erroneous mid-EEZ land buffer (%s)' % (time.strftime('%H:%M:%S')))
arcpy.Erase_analysis('%s/%s' % (gdb, sp_buf), '%s/%s' % (gdb, 'sp_landfix_buf60km'), '%s/%s' % (gdb, 'sp_%s_e' % buf))

# convert any NULL rows to Canada
if arcpy.Exists('lyr'): arcpy.Delete_management('lyr')
arcpy.MakeFeatureLayer_management('%s/sp_%s_e' % (gdb, buf), 'lyr', '"sp_name" IS NULL OR "sp_id" IS NULL')
n = int(arcpy.GetCount_management('lyr').getOutput(0))
if (n > 0):
    print '  WARNING!: %s has %d rows where sp_name is NULL. Presuming Canada.' % (sp_buf, n)   

    arcpy.CalculateField_management('lyr',  'sp_name', "'Canada'", 'PYTHON_9.3')
    arcpy.Delete_management('lyr')
    arcpy.MakeFeatureLayer_management('%s/sp_%s_e' % (gdb, buf), 'lyr', '"sp_name" = \'Canada\'')
    # update field values to Canada
    for fld, val in dict_CAN.iteritems():
        print '    ',fld, val
        if type(val) is str:
            val_str = "'%s'" % val
        else:
            val_str = '%g' % val
コード例 #24
0
def make_time_commodity_maps(mxd, image_name, the_scenario, logger):

    scenario_gdb = the_scenario.main_gdb

    # Check if route segment layer has any data--
    # Currently only mapping if there is route data
    if arcpy.Exists("route_segments_lyr"):
        arcpy.Delete_management("route_segments_lyr")
    arcpy.MakeFeatureLayer_management(os.path.join(scenario_gdb, 'optimized_route_segments'),
                                      "route_segments_lyr", "Include_Map = 1")
    result = arcpy.GetCount_management("route_segments_lyr")
    count = int(result.getOutput(0))

    if count > 0:

        # reset the map so we are working from a clean and known starting point.
        reset_map_base_layers(mxd, logger)

        # get a dictionary of all the layers in the mxd
        # might want to get a list of groups
        layer_dictionary = get_layer_dictionary(mxd, logger)

        # create a variable for for each layer of interest for the time and commodity mapping
        # so we can access each layer easily
        time_commodity_segments_lyr = layer_dictionary["TIME_COMMODITY"]
        time_commodity_aggregated_lyr = layer_dictionary["TIME_COMMODITY_AGGREGATED"]
        time_commodity_aggregated_w_facilities_lyr = layer_dictionary["TIME_COMMODITY_AGGREGATED_W_FACILITIES"]

        # START MAKING THE MAPS!

        # Establish definition queries to define the subset for each layer,
        # turn off if there are no features for that particular subset.
        for groupLayer in [time_commodity_segments_lyr, time_commodity_aggregated_lyr,
                           time_commodity_aggregated_w_facilities_lyr]:
            for subLayer in groupLayer:
                if subLayer.supports("DATASOURCE"):
                    if subLayer.dataSource == os.path.join(scenario_gdb, 'optimized_route_segments'):
                        subLayer.definitionQuery = "Include_Map = 1"

                    if subLayer.dataSource == os.path.join(scenario_gdb, 'raw_material_producers'):
                        subLayer.definitionQuery = "Include_Map = 1"
                        rmp_count = get_feature_count(subLayer, logger)

                    if subLayer.dataSource == os.path.join(scenario_gdb, 'processors'):
                        subLayer.definitionQuery = "Include_Map = 1"
                        proc_count = get_feature_count(subLayer, logger)

                    if subLayer.dataSource == os.path.join(scenario_gdb, 'ultimate_destinations'):
                        subLayer.definitionQuery = "Include_Map = 1"
                        dest_count = get_feature_count(subLayer, logger)

        # Actually export map to file
        time_commodity_segments_lyr.visible = True
        for subLayer in time_commodity_segments_lyr:
            subLayer.visible = True
        caption = ""
        generate_map(caption, image_name, mxd, the_scenario, logger)

        time_commodity_aggregated_lyr.visible = True
        for subLayer in time_commodity_aggregated_lyr:
            subLayer.visible = True
        caption = ""
        image_name = image_name + "_aggregate"
        generate_map(caption, image_name, mxd, the_scenario, logger)

        time_commodity_aggregated_w_facilities_lyr.visible = True
        for subLayer in time_commodity_aggregated_w_facilities_lyr:
            subLayer.visible = True
        caption = ""
        image_name = image_name + "_aggregate_w_facilities"
        generate_map(caption, image_name, mxd, the_scenario, logger)

        # Clean up mxd
        del mxd

    # No mapping if there are no routes
    else:
        logger.info("no routes for this combination of time steps and commodities... skipping mapping...")
コード例 #25
0
                     " to analysis grid...(" + str(thisPlanEnt) + "/" +
                     str(len(userTab) - 1) + ")")

    fromShp = userTab[thisPlanEnt][0]
    fields = userTab[thisPlanEnt][1]
    fromGeometry = userTab[thisPlanEnt][2]
    outShp = os.path.dirname(analGrid) + "\\" + os.path.basename(
        fromShp).split(".")[0] + "_intermediate" + str(thisPlanEnt) + ".shp"

    # make list from input
    fieldsOrig = fields
    fields = fields.split(";")

    # find and copy analysis grid
    if arcpy.Exists(attGrid):
        arcpy.Delete_management(attGrid)
    arcpy.Copy_management(analGrid, attGrid)
    if arcpy.Exists(outShp):
        arcpy.Delete_management(outShp)
    arcpy.Copy_management(analGrid, outShp)

    if fromGeometry != "Point":

        # copy origin data
        fromShp_int = os.path.dirname(fromShp) + "/" + os.path.basename(
            fromShp).split(".")[0] + "_intermediate.shp"
        if arcpy.Exists(fromShp_int):
            arcpy.Delete_management(fromShp_int)
        arcpy.Copy_management(fromShp, fromShp_int)
        fromShp = fromShp_int
コード例 #26
0
def dissolve_optimal_route_segments_feature_class_for_commodity_mapping(layer_name, sql_where_clause, the_scenario,
                                                                        logger):

    # Make a dissolved version of fc for mapping aggregate flows
    logger.info("start: dissolve_optimal_route_segments_feature_class_for_commodity_mapping")

    scenario_gdb = the_scenario.main_gdb

    arcpy.env.workspace = scenario_gdb

    arcpy.MakeFeatureLayer_management("optimized_route_segments", "optimized_route_segments_lyr")
    arcpy.SelectLayerByAttribute_management(in_layer_or_view="optimized_route_segments_lyr",
                                            selection_type="NEW_SELECTION", where_clause=sql_where_clause)

    if arcpy.Exists("optimized_route_segments_dissolved_commodity"):
        arcpy.Delete_management("optimized_route_segments_dissolved_commodity")

    if arcpy.Exists("optimized_route_segments_dissolved_" + layer_name):
        arcpy.Delete_management("optimized_route_segments_dissolved_" + layer_name)

    # Dissolve
    arcpy.Dissolve_management("optimized_route_segments_lyr", "optimized_route_segments_dissolved_tmp",
                              ["NET_SOURCE_NAME", "NET_SOURCE_OID", "ARTIFICIAL"],
                              [['COMMODITY_FLOW', 'SUM']], "SINGLE_PART", "DISSOLVE_LINES")

    # Second dissolve needed to accurately show aggregate pipeline flows
    arcpy.FeatureToLine_management("optimized_route_segments_dissolved_tmp", "optimized_route_segments_split_tmp")

    arcpy.AddGeometryAttributes_management("optimized_route_segments_split_tmp", "LINE_START_MID_END")

    arcpy.Dissolve_management("optimized_route_segments_split_tmp", "optimized_route_segments_dissolved_tmp2",
                              ["NET_SOURCE_NAME", "Shape_Length", "MID_X", "MID_Y", "ARTIFICIAL"],
                              [["SUM_COMMODITY_FLOW", "SUM"]], "SINGLE_PART", "DISSOLVE_LINES")

    arcpy.AddField_management(in_table="optimized_route_segments_dissolved_tmp2", field_name="SUM_COMMODITY_FLOW",
                              field_type="DOUBLE", field_precision="", field_scale="", field_length="", field_alias="",
                              field_is_nullable="NULLABLE", field_is_required="NON_REQUIRED", field_domain="")
    arcpy.CalculateField_management(in_table="optimized_route_segments_dissolved_tmp2", field="SUM_COMMODITY_FLOW",
                                    expression="!SUM_SUM_COMMODITY_FLOW!", expression_type="PYTHON_9.3", code_block="")
    arcpy.DeleteField_management(in_table="optimized_route_segments_dissolved_tmp2",
                                 drop_field="SUM_SUM_COMMODITY_FLOW")
    arcpy.DeleteField_management(in_table="optimized_route_segments_dissolved_tmp2", drop_field="MID_X")
    arcpy.DeleteField_management(in_table="optimized_route_segments_dissolved_tmp2", drop_field="MID_Y")

    # Sort for mapping order
    arcpy.AddField_management(in_table="optimized_route_segments_dissolved_tmp2", field_name="SORT_FIELD",
                              field_type="SHORT")
    arcpy.MakeFeatureLayer_management("optimized_route_segments_dissolved_tmp2", "dissolved_segments_lyr")
    arcpy.SelectLayerByAttribute_management(in_layer_or_view="dissolved_segments_lyr", selection_type="NEW_SELECTION",
                                            where_clause="NET_SOURCE_NAME = 'road'")
    arcpy.CalculateField_management(in_table="dissolved_segments_lyr", field="SORT_FIELD",
                                    expression=1, expression_type="PYTHON_9.3")
    arcpy.SelectLayerByAttribute_management(in_layer_or_view="dissolved_segments_lyr", selection_type="NEW_SELECTION",
                                            where_clause="NET_SOURCE_NAME = 'rail'")
    arcpy.CalculateField_management(in_table="dissolved_segments_lyr", field="SORT_FIELD",
                                    expression=2, expression_type="PYTHON_9.3")
    arcpy.SelectLayerByAttribute_management(in_layer_or_view="dissolved_segments_lyr", selection_type="NEW_SELECTION",
                                            where_clause="NET_SOURCE_NAME = 'water'")
    arcpy.CalculateField_management(in_table="dissolved_segments_lyr", field="SORT_FIELD",
                                    expression=3, expression_type="PYTHON_9.3")
    arcpy.SelectLayerByAttribute_management(in_layer_or_view="dissolved_segments_lyr", selection_type="NEW_SELECTION",
                                            where_clause="NET_SOURCE_NAME LIKE 'pipeline%'")
    arcpy.CalculateField_management(in_table="dissolved_segments_lyr", field="SORT_FIELD",
                                    expression=4, expression_type="PYTHON_9.3")

    arcpy.Sort_management("optimized_route_segments_dissolved_tmp2", "optimized_route_segments_dissolved_commodity",
                          [["SORT_FIELD", "ASCENDING"]])

    # Delete temp fc's
    arcpy.Delete_management("optimized_route_segments_dissolved_tmp")
    arcpy.Delete_management("optimized_route_segments_split_tmp")
    arcpy.Delete_management("optimized_route_segments_dissolved_tmp2")
    arcpy.Delete_management("optimized_route_segments_lyr")
    arcpy.Delete_management("dissolved_segments_lyr")

    # Copy to permanent fc (unique to commodity name)
    arcpy.CopyFeatures_management("optimized_route_segments_dissolved_commodity",
                                  "optimized_route_segments_dissolved_" + layer_name)
コード例 #27
0
 def deleteLayer(self, layerpath):
     try:
         ss = arcpy.Delete_management(layerpath)
         return True
     except Exception as e:
         print e.message
コード例 #28
0
# Import required modules
import arcpy
from arcpy.sa import *
arcpy.CheckOutExtension("Spatial")
import os  # provides access to operating system funtionality such as file and directory paths
import sys  # provides access to Python system functions
import traceback  # used for error handling
from datetime import datetime  # for time-stamping

# set in folder
root = r'C:/Users/mxhensch/GIS_data/ForestModeling/'
inDir = os.path.join(root, "ADK/ADKSolRad/")
arcpy.env.workspace = inDir
# set out folder
outDir = inDir  #os.path.join(root,"ADK/FINAL")
arcpy.Delete_management(
    "in_memory")  #Clear in_memory to avoid schema lock errors

#Set env
#WGS = arcpy.SpatialReference("WGS_1984.prj")
#UTM18 = outDir+"NAD_1983_UTM_Zone_18N.prj"
#clip = os.path.join(root+"ADK_buffer.shp")
#arcpy.env.extent = clip
arcpy.env.overwriteOutput = True

##get master raster
#SenMaster = os.path.join(outDir,"FINAL/0730_B02.tif")
##arcpy.env.outputCoordinateSystem = WGS
#arcpy.env.snapRaster = SenMaster
#resX = arcpy.GetRasterProperties_management(SenMaster, "CELLSIZEX")
#resY = arcpy.GetRasterProperties_management(SenMaster, "CELLSIZEY")
#if resX.getOutput(0) == resY.getOutput(0):
コード例 #29
0
#output
outStreet = os.path.join(workspace, outStreetName)

#Start

try:
    # Remove Highways and Ramps
    whereclause1 = '''NOT "CLASS" = 'H' AND NOT "CLASS" = 'RAMP' '''  # Note Centerline file did not have HWY
    arcpy.AddMessage("Where Clause: " + whereclause1)
    allstreetlyr = arcpy.MakeFeatureLayer_management(inStreet, "allstreetlyr",
                                                     whereclause1)

    #Cleanup incase their are any hanging files...
    if arcpy.Exists("SplitLine"):
        arcpy.Delete_management("SplitLine")

    #Split Lines until all roadway segments are less than 530 feet.
    count = 1
    i = 0
    RndptD = 250  #Used to split segments

    SptLn_start_time = time.time()
    arcpy.AddMessage("Start Split Line Process at: %s minutes ---" % (round(
        (time.time() - start_time) / 60, 2)))
    while count > 0:
        i += 1
        #Get Count of Features Greater than 530 feet
        arcpy.AddMessage(str(i) + " : Split segments >= 530 feet")
        whereclause2 = "shape_length >= 530"
        if not arcpy.Exists("SplitLine"):
コード例 #30
0
def update_route_system(header,
                        itin,
                        vertices_comprising,
                        split_dict_ABB,
                        new_ABB_values,
                        common_id_field,
                        order_field=None):
    ''' A method for updating any of the MHN's route systems: hwyproj,
        bus_base, bus_current, and bus_future. order_field argument allows for
        separate treatment of hwyproj and the bus routes. '''

    # Copy itinerary table to memory for non-destructive editing
    header_name = MHN.break_path(header)['name']
    itin_name = MHN.break_path(itin)['name']
    arcpy.AddMessage('-- ' + header_name + '...')
    itin_copy_path = MHN.mem
    itin_copy_name = itin_name + '_copy'
    itin_copy = os.path.join(itin_copy_path, itin_copy_name)
    arcpy.CreateTable_management(itin_copy_path, itin_copy_name, itin)

    itin_OID_field = MHN.determine_OID_fieldname(itin)
    itin_dict = MHN.make_attribute_dict(itin, itin_OID_field)

    # Check validity of ABB value on each line, adjusting the itinerary when
    # invalidity is due to a split
    max_itin_OID = max([OID for OID in itin_dict])
    split_itin_dict = {}
    all_itin_OIDs = list(itin_dict.keys())
    all_itin_OIDs.sort(
    )  # For processing in itinerary order, rather than in the dict's pseudo-random order
    bad_itin_OIDs = []
    if order_field:
        order_bump = 0
    for OID in all_itin_OIDs:
        common_id = itin_dict[OID][common_id_field]
        if order_field:
            order = itin_dict[OID][order_field]
            if order == 1:
                order_bump = 0
        ABB = itin_dict[OID]['ABB']
        if ABB != None:
            anode = int(ABB.split('-')[0])
            bnode = int(ABB.split('-')[1])
            baselink = int(ABB.split('-')[2])
        else:
            anode = 0
            bnode = 0
            baselink = 0
        if ABB not in new_ABB_values:
            if not order_field:  # For hwyproj, all deleted links should be removed from coding. Split links will be replaced.
                bad_itin_OIDs.append(OID)
            if (
                    anode, bnode, baselink
            ) in split_dict_ABB:  # If ABB is invalid because it was split, find new ABB values
                ordered_segments = split_dict_ABB[(anode, bnode, baselink)]
                if order_field:
                    bad_itin_OIDs.append(
                        OID
                    )  # For bus routes, only split links should be removed (and replaced).
                    itin_a = itin_dict[OID]['ITIN_A']
                    itin_b = itin_dict[OID]['ITIN_B']
                    if itin_b == anode or itin_a == bnode:
                        backwards = True
                        ordered_segments = ordered_segments[::
                                                            -1]  # Make a reversed copy of the ordered segments
                    else:
                        backwards = False
                for split_ABB in ordered_segments:
                    split_anode = int(split_ABB[0].split('-')[0])
                    split_bnode = int(split_ABB[0].split('-')[1])
                    split_baselink = int(split_ABB[0].split('-')[2])
                    split_length_ratio = split_ABB[3]
                    max_itin_OID += 1
                    split_itin_dict[max_itin_OID] = itin_dict[OID].copy()
                    split_itin_dict[max_itin_OID]['ABB'] = split_ABB[0]

                    if order_field:
                        if backwards:
                            split_itin_a = split_bnode
                            split_itin_b = split_anode
                            split_start_ratio = 1 - (split_ABB[2] +
                                                     split_length_ratio)
                        else:
                            split_itin_a = split_anode
                            split_itin_b = split_bnode
                            split_start_ratio = split_ABB[2]

                        # Adjust itinerary nodes and order:
                        split_itin_dict[max_itin_OID]['ITIN_A'] = split_itin_a
                        split_itin_dict[max_itin_OID]['ITIN_B'] = split_itin_b
                        if split_itin_a != itin_a:  # First split segment receives the same order as the original
                            order_bump += 1
                        split_itin_dict[max_itin_OID][
                            order_field] += order_bump

                        # Adjust variables that only apply to original link's itin_b:
                        if split_itin_dict[max_itin_OID][
                                'LAYOVER'] > 0 and split_itin_b != itin_b:
                            split_itin_dict[max_itin_OID]['LAYOVER'] = 0

                        # Apportion length-dependent variables:
                        split_itin_dict[max_itin_OID][
                            'LINE_SERV_TIME'] *= split_length_ratio
                        F_MEAS = split_itin_dict[max_itin_OID]['F_MEAS']
                        T_MEAS = split_itin_dict[max_itin_OID]['T_MEAS']
                        meas_diff = T_MEAS - F_MEAS
                        if header_name == 'bus_future':
                            future = True
                        else:
                            future = False
                        if not future:  # bus_future has no DEP_TIME or ARR_TIME
                            DEP_TIME = split_itin_dict[max_itin_OID][
                                'DEP_TIME']
                            ARR_TIME = split_itin_dict[max_itin_OID][
                                'ARR_TIME']
                            time_diff = ARR_TIME - DEP_TIME
                        if split_itin_a != itin_a:
                            split_itin_dict[max_itin_OID][
                                'F_MEAS'] += meas_diff * split_start_ratio
                            if not future:
                                split_itin_dict[max_itin_OID][
                                    'DEP_TIME'] += time_diff * split_start_ratio
                        else:
                            pass  # F_MEAS & DEP_TIME are already correct for itin_a
                        if split_itin_b != itin_b:
                            split_itin_dict[max_itin_OID][
                                'T_MEAS'] = F_MEAS + meas_diff * (
                                    split_start_ratio + split_length_ratio)
                            if not future:
                                split_itin_dict[max_itin_OID][
                                    'ARR_TIME'] = DEP_TIME + time_diff * (
                                        split_start_ratio + split_length_ratio)
                        else:
                            pass  # T_MEAS & ARR_TIME are already correct for itin_b
        else:
            if order_field:
                itin_dict[OID][order_field] += order_bump

    for OID in bad_itin_OIDs:
        del itin_dict[
            OID]  # Remove invalid ABB records after accounting for splits

    # Combine itinerary dicts, adjust ITIN_ORDER and report new gaps and write
    # updated records to table in memory.
    itin_dict.update(split_itin_dict)
    itin_fields = [
        field.name for field in arcpy.ListFields(itin_copy)
        if field.type != 'OID'
    ]
    with arcpy.da.InsertCursor(itin_copy, itin_fields) as coding_cursor:
        for OID in itin_dict:
            coding_cursor.insertRow(
                [itin_dict[OID][field] for field in itin_fields])

    # Sort records into a second table in memory.
    itin_updated = os.path.join(MHN.mem,
                                '{0}_itin_updated'.format(header_name))
    if order_field:
        arcpy.Sort_management(
            itin_copy, itin_updated,
            [[common_id_field, 'ASCENDING'], [order_field, 'ASCENDING']])
    else:
        arcpy.Sort_management(itin_copy, itin_updated,
                              [[common_id_field, 'ASCENDING']])
    arcpy.Delete_management(itin_copy)

    # Re-build line features.
    header_updated_path = MHN.mem
    header_updated_name = '{0}_updated'.format(header_name)
    header_updated = os.path.join(header_updated_path, header_updated_name)
    arcs_traversed_by = {}
    field_list = ['ABB', common_id_field]
    with arcpy.da.SearchCursor(itin_updated, field_list) as itin_cursor:
        for row in itin_cursor:
            abb = row[0]
            common_id = row[1]
            if common_id in arcs_traversed_by:
                arcs_traversed_by[common_id].append(abb)
            else:
                arcs_traversed_by[common_id] = [abb]

    common_id_list = [
        row[0] for row in arcpy.da.SearchCursor(header, [common_id_field])
    ]
    arcpy.CreateFeatureclass_management(header_updated_path,
                                        header_updated_name, 'POLYLINE',
                                        header)
    with arcpy.da.InsertCursor(header_updated,
                               ['SHAPE@', common_id_field]) as routes_cursor:
        for common_id in common_id_list:
            route_vertices = arcpy.Array([
                vertices_comprising[abb]
                for abb in arcs_traversed_by[common_id]
                if abb in vertices_comprising
            ])
            try:
                route = arcpy.Polyline(route_vertices)
                routes_cursor.insertRow([route, common_id])
            except:
                itin_delete_query = ''' "{0}" = '{1}' '''.format(
                    common_id_field, common_id)
                with arcpy.da.UpdateCursor(
                        itin_updated, ['OID@'],
                        itin_delete_query) as itin_delete_cursor:
                    for row in itin_delete_cursor:
                        itin_delete_cursor.deleteRow()
                arcpy.AddWarning(
                    '   - {0} = {1} cannot be rebuilt because the arcs comprising '
                    'it no longer exist (or have new ABB). It cannot be rebuilt '
                    'and is being deleted. Please re-import it if necessary.'.
                    format(common_id_field, common_id))

    # Append the header file attribute values from a search cursor of the original.
    attributes = MHN.make_attribute_dict(header, common_id_field)
    update_fields = [
        field.name for field in arcpy.ListFields(header)
        if field.type not in ['OID', 'Geometry']
        and field.name.upper() != 'SHAPE_LENGTH'
    ]
    with arcpy.da.UpdateCursor(header_updated,
                               update_fields) as attribute_cursor:
        for row in attribute_cursor:
            common_id = row[update_fields.index(common_id_field)]
            for field in [
                    field for field in update_fields
                    if field != common_id_field
            ]:
                row[update_fields.index(field)] = attributes[common_id][field]
            attribute_cursor.updateRow(row)

    return ((header, header_updated), (itin, itin_updated))