Example #1
0
def sort_shpfilebyidfield(inputshpfile, outputshpfile, intfield, desc=False):
    if desc:
        arcpy.Sort_management(inputshpfile, outputshpfile,
                              [[intfield, "DESCENDING"]])
    else:
        arcpy.Sort_management(inputshpfile, outputshpfile,
                              [[intfield, "ASCENDING"]])
    def GemstoneInProvince():
        # input shapefile containing all gemstone feature classes (resulting layer after performing spatial join with Province
        # boundary layer)
        in_fc = r"C:\Gemstone-Geology maps\all_gemstones\join\gemstone_provinces.shp"
        # output shapefile for Sort_management to output
        fc = r"C:\Gemstone-Geology maps\all_gemstones\join\gemstone_provinces_sorted.shp"
        # Sort gemstone name in alphabetical order so that final list will be grouped by gemstone
        print(
            "Sorting Master gemstone-province feature class in ascending order "
        )
        arcpy.Sort_management(in_fc, fc, [["Commodity1", 'ASCENDING']])
        fields = [
            "Commodity1", "ADM1_EN"
        ]  # Search Cursor reads through gemstone and province name fields
        gem_loc = []

        print(
            "Searching through feature class attribute table for unique combinations of gemstones in provinces "
        )
        with arcpy.da.SearchCursor(fc, fields) as cursor:
            for row in cursor:
                if row not in gem_loc:  # don't add to gem_loc list if this gemstone is already in this province
                    gem_loc.append(row)
        # print gemstone-province pairs as strings
        # for i in gem_loc:
        #     print(' ... '.join(i))
        print(
            "Finished compiling list of unique gemstone-province combinations")
        print("***********************************")
        return gem_loc[:]
def summarizeGumByCategory():

    layer = "in_memory\gumcount_0plus"
    #table = "GumCountByCategory"
    table = "in_memory/CityGumCountByCategory"
    #table = publishGDB + "/CityGumCountByCategory"
    #arcpy.Delete_management(layer)

    arcpy.MakeFeatureLayer_management("Sites", "lyr")
    arcpy.SelectLayerByAttribute_management("lyr", "NEW_SELECTION",
                                            "GUM_COUNT_ACTUAL > 0")
    arcpy.CopyFeatures_management("lyr", layer)

    #arcpy.Statistics_analysis(layer, "GumCountByCategory", "GUM_COUNT_ACTUAL SUM", "SIC_NAME")
    arcpy.Statistics_analysis(layer, table, "GUM_COUNT_ACTUAL SUM", "SIC_NAME")
    arcpy.AddField_management(table, "AVG_GUM_COUNT", "SHORT")

    fields = ['SIC_NAME', 'FREQUENCY', 'SUM_GUM_COUNT_ACTUAL', 'AVG_GUM_COUNT']

    with arcpy.da.UpdateCursor(table, fields) as cursor:
        for row in cursor:
            print(row)
            if row[0] == None:
                row[0] = "ADDRESS POINT"
            row[3] = round(row[2] / row[1])
            cursor.updateRow(row)

    sort_fields = [["SUM_GUM_COUNT_ACTUAL", "DESCENDING"]]

    sortTable = publishGDB + "/CityGumCountByCategory"
    arcpy.Sort_management(table, sortTable, sort_fields)
Example #4
0
def sortToCameraRange(roughAOI,AOI_output):
    # establish new sorting, numbering, and area fields
    arcpy.AddField_management(roughAOI, "YMin", "DOUBLE", "20","12")
    arcpy.AddField_management(roughAOI, "AOI", "SHORT")
    arcpy.AddField_management(roughAOI,"SqrMeters","Double")

    # compile list of minimum Y bounds of each polygon (proximal to PHENOCAM-North)
    yMinExtents = []
    readFeatures = arcpy.SearchCursor(roughAOI)
    changeFeatures = arcpy.UpdateCursor(roughAOI)
    featureNames = arcpy.Describe(roughAOI).shapeFieldName
    for readFeat in readFeatures:
        yMinExtents.append(readFeat.getValue('Shape').extent.YMin)

    # apply respective minimum Y bounds to each features new attribute field
    for idx, changeFeat in enumerate(changeFeatures):
        changeFeat.setValue("yMin",yMinExtents[idx])
        changeFeatures.updateRow(changeFeat)

    # resort the attribute table based on the newly filled YMin attribute field
    arcpy.env.addOutputsToMap = True
    arcpy.Sort_management(roughAOI, AOI_output, [["YMin","ASCENDING"]], "")
    arcpy.Delete_management(roughAOI)

    # create top-down AOI numerical index (thnx to YMin-sort), and update AOI field
    changeFeatures = arcpy.UpdateCursor(AOI_output)
    for idx, changeFeat in enumerate(changeFeatures):
        aoiIdx = idx+1
        changeFeat.setValue("AOI",aoiIdx)
        changeFeatures.updateRow(changeFeat)
    def OrdenarManzanasFalsoCod(self):
        if self.uso_falso_cod == 1:
            manzanas_ordenadas = arcpy.Sort_management(
                self.tb_manzanas, self.tb_manzanas_ordenadas,
                ["UBIGEO", "ZONA", "FALSO_COD"])
        else:
            manzanas_ordenadas = arcpy.Sort_management(
                self.tb_manzanas, self.tb_manzanas_ordenadas,
                ["UBIGEO", "ZONA", "MANZANA"])

        expression = "flg_manzana(!CANT_VIV!)"
        codeblock = """def flg_manzana(CANT_VIV):\n  if (CANT_VIV>18):\n    return 1\n  else:\n    return 0"""
        #.format(self.cant_viv_techo)
        arcpy.AddField_management(manzanas_ordenadas, "FLG_MZ", "SHORT")
        arcpy.CalculateField_management(manzanas_ordenadas, "FLG_MZ",
                                        expression, "PYTHON_9.3", codeblock)
Example #6
0
 def get_footprints(centroids, shapefile):
     os.makedirs(resources.temp_footprints, exist_ok=True)
     output_feature_class = os.path.join(resources.temp_footprints,
                                         "unsorted.shp")
     join_operation = "JOIN_ONE_TO_MANY"
     join_type = "KEEP_COMMON"
     match_option = "CONTAINS"
     arcpy.SpatialJoin_analysis(shapefile,
                                centroids,
                                output_feature_class,
                                join_operation=join_operation,
                                join_type=join_type,
                                match_option=match_option)
     fields = [
         field.name for field in arcpy.ListFields(output_feature_class)
     ]
     valids = [
         "FID", "Shape", "Roof", "Score", "Perc_000", "Perc_010",
         "Perc_050", "Perc_090", "Perc_100"
     ]
     to_drop = [item for item in fields if item not in valids]
     arcpy.DeleteField_management(output_feature_class, to_drop)
     output = os.path.join(resources.temp_footprints, "footprints.shp")
     arcpy.Sort_management(output_feature_class, output, "Shape ASCENDING",
                           "UL")
     arcpy.Delete_management(output_feature_class)
     arcpy.DeleteIdentical_management(output, "Shape", None, 0)
     return output
def generate_top_ranked_subset(in_feature_class, ranking_attribute,
                               out_feature_class):
    # Sort the feature class
    arcpy.Sort_management(in_feature_class, out_feature_class,
                          [[ranking_attribute, "DESCENDING"]])
    # Add a rank attribute and populate it
    arcpy.AddField_management(out_feature_class, "Rank", "LONG")
    arcpy.CalculateField_management(out_feature_class, "Rank", "!OBJECTID!",
                                    "PYTHON_9.3")

    # Create a new feature class with the top third rows
    max_rank = get_max(out_feature_class, "Rank")
    one_third_rows = max_rank / 3
    #print(one_third_rows)
    arcpy.SelectLayerByAttribute_management(out_feature_class, "NEW_SELECTION",
                                            "Rank <= " + str(one_third_rows))
    arcpy.CopyFeatures_management(out_feature_class,
                                  out_feature_class + "_top10pct")
    arcpy.SelectLayerByAttribute_management(out_feature_class,
                                            "CLEAR_SELECTION")

    #  Create a new feature class with the top 20 rows
    arcpy.SelectLayerByAttribute_management(out_feature_class, "NEW_SELECTION",
                                            "Rank <= 20")
    arcpy.CopyFeatures_management(out_feature_class,
                                  out_feature_class + "_top20")
    arcpy.SelectLayerByAttribute_management(out_feature_class,
                                            "CLEAR_SELECTION")
Example #8
0
def importar_tablas_trabajo(data, campos):
    arcpy.env.overwriteOutput = True

    db = 'CPV_SEGMENTACION_GDB'
    ip = '172.18.1.93'
    usuario = 'sde'
    password = '******'
    path_conexion = conx.conexion_arcgis(db, ip, usuario, password)
    arcpy.env.workspace = path_conexion

    temp_ubigeos = ""
    i = 0
    for x in data:
        i = i + 1
        if (i == 1):
            temp_ubigeos = "'{}'".format(x[0])
        else:
            temp_ubigeos = "{},'{}'".format(temp_ubigeos, x[0])

    if len(data) > 0:
        sql = expresion.Expresion_2(data, campos)
    else:
        sql = ' FLAG_NUEVO=1'

    list_capas = [
        ["{}.sde.VW_FRENTES".format(db), tb_frentes_temp, 1],
    ]

    for i, capa in enumerate(list_capas):
        if capa[2] == 1:

            print "select * from {} where {} ".format(capa[0], sql)
            x = arcpy.MakeQueryLayer_management(
                path_conexion, 'capa{}'.format(i),
                "select * from {} where {} ".format(capa[0], sql))

        else:
            x = arcpy.MakeQueryTable_management(capa[0], "capa{}".format(i),
                                                "USE_KEY_FIELDS", "objectid",
                                                "", sql)

        if capa[1] in [tb_frentes_temp]:
            temp = arcpy.CopyFeatures_management(x,
                                                 'in_memory/temp_{}'.format(i))
            arcpy.AddField_management(temp, 'MANZANA2', 'TEXT', 50)
            arcpy.CalculateField_management(temp, 'MANZANA2', '!MANZANA!',
                                            "PYTHON_9.3")
            arcpy.DeleteField_management(temp, ['MANZANA'])
            arcpy.CopyFeatures_management(temp, capa[1])
            arcpy.AddField_management(capa[1], 'MANZANA', 'TEXT', 50)
            arcpy.CalculateField_management(capa[1], 'MANZANA', '!MANZANA2!',
                                            "PYTHON_9.3")
            arcpy.DeleteField_management(capa[1], ['MANZANA2'])
        else:
            arcpy.CopyFeatures_management(x, capa[1])

    arcpy.Sort_management(tb_frentes_temp, tb_frentes,
                          ['UBIGEO', 'ZONA', 'MANZANA', 'FRENTE_ORD'])
Example #9
0
    def execute(self):
        # Create output GDB and add domain for asset type
        self.gdb = arcpy.CreateFileGDB_management(*os.path.split(self.gdb))[0]

        # Since NA.solve uses OID for start/end points, the OID of the feature classes being copied should
        # be maintained so that the attribute information can be linked back, if desired
        remote_temp = copy_features(self.remote,
                                    common.unique_name("in_memory/remoteTemp"))
        fixed_temp = copy_features(self.fixed,
                                   common.unique_name("in_memory/fixedTemp"))

        # The information that Near creates is not needed (distance to nearest feature and feature ID), so delete it.
        # Sorting the features by descending distance ensures that the furthest assets are backhauled first.
        arcpy.Near_analysis(remote_temp, fixed_temp)
        sort_temp = arcpy.Sort_management(remote_temp,
                                          common.unique_name("in_memory/sort"),
                                          [["NEAR_DIST", "DESCENDING"]])[0]
        common.delete(remote_temp)
        arcpy.DeleteField_management(sort_temp, ("NEAR_DIST", "NEAR_FID"))

        # For each remote asset, find the surrounding nearest fixed assets. This Near Table will be used
        # during backhaul to only load particular assets into the Facilities sublayer. Because this table is sorted
        # where the nearest features are first (NEAR_DIST and NEAR_RANK are ascending), an arbitrarily defined number of
        # fixed assets can be loaded into the Closest Facility via list slicing.

        # In practice, set closest_count to a reasonable number (such as 100). If closest_count=None, then the resultant
        # Near Table will have (nrows in remote * nrows in fixed) rows. This quickly gets very large (17m+ during dev).
        count = arcpy.GetCount_management(sort_temp)
        arcpy.AddMessage("\t{} Processing {} features...".format(
            common.timestamp(), count))
        method = "GEODESIC" if arcpy.Describe(
            sort_temp).spatialReference.type == "Geographic" else "PLANAR"
        near_temp = arcpy.GenerateNearTable_analysis(
            sort_temp,
            fixed_temp,
            common.unique_name("in_memory/near"),
            closest="ALL",
            closest_count=NEAR_TABLE_SIZE,
            method=method)[0]
        arcpy.AddMessage("\t{} Saving Near Table...".format(
            common.timestamp()))
        arcpy.DeleteField_management(near_temp, ["NEAR_DIST", "NEAR_RANK"])
        near_out = arcpy.CopyRows_management(
            near_temp, os.path.join(self.gdb, NEAR_TABLE))[0]
        common.delete(near_temp)

        arcpy.ResetProgressor()
        self.calc_update(sort_temp, "Remote")
        self.calc_update(fixed_temp, "Fixed")
        remote_out = arcpy.CopyFeatures_management(
            sort_temp, os.path.join(self.gdb, REMOTE))[0]
        fixed_out = arcpy.CopyFeatures_management(
            fixed_temp, os.path.join(self.gdb, FIXED))[0]
        common.delete([sort_temp, fixed_temp])

        return remote_out, fixed_out, near_out
def calculoSortGeneral(
    capaObjetoC
):  # organiza la tabla objeto del cluster de forma ascendente por id de cluster y distancia al punto de irradiación
    arcpy.AddMessage(time.strftime("%c") + " " + "Ejecutando Calculo Sort...")
    capaOrdenada = "in_memory" + "\\capaClusterOrdenada"
    aa = arcpy.Sort_management(capaObjetoC, capaOrdenada,
                               'NEAR_FID ASCENDING;NEAR_DIST ASCENDING',
                               'UR').getOutput(0)
    arcpy.AddMessage("Finaliza Calculo Sort")
    return capaOrdenada
def ordenar_manzanas_cod_falso(where_expression):
    manzanas_selecc = arcpy.Select_analysis(tb_manzanas,
                                            "in_memory//manzanas_selecc",
                                            where_expression)
    manzanas_ordenadas = arcpy.Sort_management(manzanas_selecc,
                                               tb_manzanas_ordenadas,
                                               ["UBIGEO", "ZONA", "FALSO_COD"])
    expression = "flg_manzana(!VIV_MZ!)"

    arcpy.AddField_management(manzanas_ordenadas, "FLG_MZ", "SHORT")
Example #12
0
def generate_ranked_subset(in_feature_class, ranking_attribute, out_feature_class):
    # Sort the attribute table
    arcpy.Sort_management(in_feature_class, out_feature_class, [[ranking_attribute, "DESCENDING"]])
    # Add a Rank attribute and populate it
    arcpy.AddField_management(out_feature_class, "Rank", "LONG")
    arcpy.CalculateField_management(out_feature_class, "Rank", "!OBJECTID!", "PYTHON_9.3")
    # Also generate a separate feature class with the top 20 values
    arcpy.SelectLayerByAttribute_management(out_feature_class, "NEW_SELECTION", "Rank <= 20")
    arcpy.CopyFeatures_management(out_feature_class, out_feature_class + "_Top20" )
    arcpy.SelectLayerByAttribute_management(out_feature_class, "CLEAR_SELECTION")
def makeNodeName2ArcsDict(caf, fields):
    # takes arc fc, list of fields (e.g. [Left_MapUnit, Right_MapUnit] )
    #   makes to and from node fcs, concatenates, sorts, finds arc-ends
    #   that share common XY values
    # returns dictionary of arcs at each node, keyed to nodename
    #    i.e., dict[nodename] = [[arc1 fields], [arc2 fields],...]
    fv1 = os.path.dirname(caf) + '/xxxNfv1'
    fv2 = os.path.dirname(caf) + '/xxxNfv12'

    lenFields = len(fields)
    addMsgAndPrint('  making endpoint feature classes')
    testAndDelete(fv1)
    arcpy.FeatureVerticesToPoints_management(caf, fv1, 'BOTH_ENDS')
    addMsgAndPrint('  adding XY values and sorting by XY')
    arcpy.AddXY_management(fv1)
    testAndDelete(fv2)
    arcpy.Sort_management(fv1, fv2,
                          [["POINT_X", "ASCENDING"], ["POINT_Y", "ASCENDING"]])
    ##fix fields statement and following field references
    fields.append('SHAPE@XY')
    indexShape = len(fields) - 1

    isFirst = True
    nArcs = 0
    nodeList = []
    with arcpy.da.SearchCursor(fv2, fields) as cursor:
        for row in cursor:
            x = row[indexShape][0]
            y = row[indexShape][1]
            arcFields = row[0:lenFields]
            if isFirst:
                isFirst = False
                lastX = x
                lastY = y
                arcs = [arcFields]
            elif abs(x - lastX) < searchRadius and abs(y -
                                                       lastY) < searchRadius:
                arcs.append(arcFields)
            else:
                nodeList.append([nodeName(lastX, lastY), arcs])
                lastX = x
                lastY = y
                arcs = [arcFields]
        nodeList.append([nodeName(lastX, lastY), arcs])

    addMsgAndPrint('  ' + str(len(nodeList)) + ' distinct nodes')
    addMsgAndPrint('  cleaning up')
    for fc in fv1, fv2:
        testAndDelete(fc)

    nodeDict = {}
    for n in nodeList:
        nodeDict[n[0]] = n[1]
    return nodeDict
def copyBackupPublishGDBtoStaging():

    arcpy.Copy_management(backupPublishGDB, publishGDB)

    sort_fields = [["GUM_COUNT_DATE", "DESCENDING"]]

    table = publishGDB + "/UnsortedWorldwideGumCountByDate"
    sortTable = publishGDB + "/WorldwideGumCountByDate"

    arcpy.Sort_management(table, sortTable, sort_fields)

    arcpy.Delete_management(table)
def createMaxSpeed (featureclass):
    fcName = os.path.basename(featureclass).rstrip(os.path.splitext(featureclass)[1]).partition("_") [2]
    arcpy.AddField_management (featureclass, "Down", "SHORT")
    arcpy.CalculateField_management (featureclass, "Down", "!MAXADDOWN!", "PYTHON")
    arcpy.Union_analysis (featureclass, "temp_union_" + fcName, "ALL")
    arcpy.Sort_management ("temp_union_" + fcName, "temp_union_sort" + fcName, [["Down", "DESCENDING"]])
    arcpy.AddField_management ("temp_union_sort" + fcName, "XCOORD", "DOUBLE")
    arcpy.AddField_management ("temp_union_sort" + fcName, "YCOORD", "DOUBLE")
    arcpy.CalculateField_management ("temp_union_sort" + fcName, "XCOORD", "!SHAPE.CENTROID!.split()[0]", "PYTHON")
    arcpy.CalculateField_management ("temp_union_sort" + fcName, "YCOORD", "!SHAPE.CENTROID!.split()[1]", "PYTHON")
    arcpy.Dissolve_management ("temp_union_sort" + fcName, "temp_union_sort_dz" + fcName, ["XCOORD", "YCOORD", "SHAPE_Area"], "Down FIRST", "MULTI_PART", "")
    arcpy.Dissolve_management ("temp_union_sort_dz" + fcName, "fc_mt_final_" + fcName + "_max_speed", ["FIRST_Down"], "", "MULTI_PART", "")
Example #16
0
def main(huc_poly, in_hydro, seg_length, outFGB):

    DeleteTF = "true"

    # clip stream lines to huc polygon boundaries.
    arcpy.AddMessage("Clipping the stream network to the HUC boundaries...")
    clip_hydro = arcpy.Clip_analysis(in_hydro, huc_poly,
                                     r"in_memory\clip_hydro")

    # segmentation of the polyline
    arcpy.AddMessage(
        "Using the SLEM script to segment the polyline feature...")
    SplitLine = dS.SLEM(clip_hydro, seg_length, r"in_memory\SplitLine",
                        DeleteTF)

    arcpy.AddMessage("Sorting the segmented line...")
    outSort = outFGB + r"\segments"
    arcpy.Sort_management(
        SplitLine, outSort,
        [["Rank_UGO", "ASCENDING"], ["Distance", "ASCENDING"]])

    arcpy.AddField_management(outSort, "Rank_DGO", "LONG", "", "", "", "",
                              "NULLABLE", "NON_REQUIRED")
    fieldname = [f.name for f in arcpy.ListFields(outSort)]
    arcpy.CalculateField_management(outSort, "Rank_DGO",
                                    "!" + str(fieldname[0]) + "!",
                                    "PYTHON_9.3")

    #delete temporary files
    arcpy.AddMessage("Deleting temporary files...")
    arcpy.Delete_management(SplitLine)

    # merges adjacent stream segments if one is less than threshold.
    arcpy.AddMessage("Cleaning line segments...")
    clusterTolerance = float(seg_length) * 0.25
    clean_stream = cS.cleanLineGeom(outSort, "Rank_UGO", "Rank_DGO",
                                    clusterTolerance)
    arcpy.AddField_management(clean_stream, "LineOID", "LONG", "", "", "", "",
                              "NULLABLE", "NON_REQUIRED")
    arcpy.CalculateField_management(clean_stream, "LineOID", '"!OBJECTID!"',
                                    "PYTHON_9.3")
    arcpy.DeleteField_management(clean_stream, "Rank_UGO")
    arcpy.DeleteField_management(clean_stream, "Rank_DGO")
    return clean_stream
Example #17
0
def segOptionA(in_hydro,
               seg_length,
               outFGB,
               outSegmentIDField="SegmentID",
               scratchWorkspace="in_memory"):
    """Segment the input stream network feature class using 'remainder at inflow of reach' method."""
    arcpy.AddMessage(
        "Segmenting process using the remainder at stream branch inflow method..."
    )
    DeleteTF = "true"

    # Segmentation of the polyline using module from Fluvial Corridors toolbox.
    splitLine = dS.SLEM(in_hydro, seg_length, scratchWorkspace + r"\splitLine",
                        DeleteTF)

    outSort = scratchWorkspace + r"\segments_sort"
    arcpy.Sort_management(
        splitLine, outSort,
        [["Rank_UGO", "ASCENDING"], ["Distance", "ASCENDING"]])

    arcpy.AddField_management(outSort, "Rank_DGO", "LONG", "", "", "", "",
                              "NULLABLE", "NON_REQUIRED")
    fieldname = [f.name for f in arcpy.ListFields(outSort)]
    arcpy.CalculateField_management(outSort, "Rank_DGO",
                                    "!" + str(fieldname[0]) + "!",
                                    "PYTHON_9.3")

    # Merges adjacent stream segments if less than 75% length threshold.
    clusterTolerance = float(seg_length) * 0.25
    clean_stream = cleanLineGeom(outSort, "Rank_UGO", "Rank_DGO",
                                 clusterTolerance)
    arcpy.AddField_management(clean_stream, outSegmentIDField, "LONG", "", "",
                              "", "", "NULLABLE", "NON_REQUIRED")
    arcpy.CalculateField_management(clean_stream, outSegmentIDField,
                                    '"!OBJECTID!"', "PYTHON_9.3")
    arcpy.DeleteField_management(clean_stream, "Rank_UGO")
    arcpy.DeleteField_management(clean_stream, "Rank_DGO")

    # Clean up
    del splitLine
    del outSort

    return clean_stream
Example #18
0
def CreateRouteEventLayer(Sites_Routes, AttTable, RouteID, BMP, EMP, Fields,
                          Output):
    IRIS_Diss = common.CreateOutPath(MainFile=Output,
                                     appendix='diss',
                                     Extension='')
    arcpy.DissolveRouteEvents_lr(
        in_events=AttTable,
        in_event_properties=' '.join([RouteID, 'LINE', BMP, EMP]),
        dissolve_field=';'.join(Fields),
        out_table=IRIS_Diss,
        out_event_properties=' '.join([RouteID, 'LINE', BMP, EMP]),
        dissolve_type="DISSOLVE",
        build_index="INDEX")

    Overlay_Event_Layer = common.CreateOutLayer('OverlayEventLayer')
    arcpy.MakeRouteEventLayer_lr(in_routes=Sites_Routes,
                                 route_id_field=RouteID,
                                 in_table=IRIS_Diss,
                                 in_event_properties=' '.join(
                                     [RouteID, 'LINE', BMP, EMP]),
                                 out_layer=Overlay_Event_Layer,
                                 offset_field="",
                                 add_error_field="ERROR_FIELD")

    Sort = common.CreateOutPath(MainFile=Output, appendix='sort', Extension='')
    arcpy.Sort_management(in_dataset=Overlay_Event_Layer,
                          out_dataset=Sort,
                          sort_field=';'.join([RouteID, BMP, EMP]))
    Final_Layer = common.CreateOutLayer('FinalLayer')

    arcpy.MakeFeatureLayer_management(in_features=Sort, out_layer=Final_Layer)
    arcpy.SelectLayerByAttribute_management(in_layer_or_view=Final_Layer,
                                            selection_type='NEW_SELECTION',
                                            where_clause="Shape_Length > 0")
    arcpy.Delete_management(Output)
    arcpy.CopyFeatures_management(in_features=Final_Layer,
                                  out_feature_class=Output)

    arcpy.Delete_management(IRIS_Diss)
    arcpy.Delete_management(Overlay_Event_Layer)
    arcpy.Delete_management(Sort)
    arcpy.Delete_management(Final_Layer)
Example #19
0
def extract_by_proximity(multipt, targetpt, npts, output_file):
    #set local vars
    n=npts

    # copy in_memory files to local variables for the function
    arcpy.CopyFeatures_management(multipt, "in_memory//test_cloud_copy")
    arcpy.CopyFeatures_management(targetpt, "in_memory//singlepointcopy")
    single_pt_copy = "in_memory//singlepointcopy"
    test_cloud_copy = "in_memory//test_cloud_copy"
    


    ##########################################################
    # use near 3d to append distance data to the pt_cloud points

    # check out the exenstion
    arcpy.CheckOutExtension("3D")
    # apply the extension
    arcpy.Near3D_3d(test_cloud_copy,single_pt_copy, angle="ANGLE")
    # check in the extension
    arcpy.CheckInExtension("3D")
    

                                                         
    ##########################################################
    # sort the resulting output shp table by distance
    arcpy.Sort_management(test_cloud_copy,"in_memory//test_cloud_copy_sorted",sort_field="NEAR_DIST3 ASCENDING", spatial_sort_method="UR")
    test_cloud_copy_sorted = "in_memory//test_cloud_copy_sorted"
    
                                  
      
    
    #####################################################################
    # extract the lowest n table entries and save as the final output file
    arcpy.Select_analysis(test_cloud_copy_sorted,output_file, '"FID"<' + str(n+1))
    #print("Proximity analysis: Complete. Closest " + str(n) + " points extracted.")
    return output_file;
Example #20
0
        u"FID", u"OBJECTID", u"Shape", u"INCSTRLEN", u"STRING2DBL", u"Field2",
        u"Field5", u"INT2DBL", u"INDEX"
    ]
    print("Keeping ", keepfield)
    # empty list
    Drop_Field = []
    # Compile list of fields to delete
    for field in fieldlist2:
        if field not in keepfield:
            Drop_Field.append(field)
    print("dropping ", Drop_Field)
    # Delete unwanted fields
    arcpy.DeleteField_management(files.copy, Drop_Field)
    print("fields deleted")
    # Sort records by geographic location
    arcpy.Sort_management(files.copy, files.final, "Shape ASCENDING", "UL")
    print("points sorted")
    # Assign new ID number to points
    Code_Block = "rec=0 \\ndef autoIncrement(): \\n    global rec \\n    pStart = 1 \\n    pInterval = 1 \\n    if (rec == 0): \\n        rec = pStart \\n    else: \\n        rec += pInterval \\n    return rec"
    arcpy.CalculateField_management(files.final, "INDEX", "autoIncrement()",
                                    "PYTHON_9.3", Code_Block)
    print("new id assigned")

    # If more than 10 points in obstruction file, extract smaller cells as individual shapefiles
    # count points in sorted (final) file.
    featureclass = files.final
    variable = "INDEX"
    features = Stereoframework.Features(featureclass)
    # set coordinates system of fishnet
    #    env.outputCoordinateSystem = features.coord_sys()
    # Count all points in file
Example #21
0
# Set workspace and environment variables
arcpy.env.workspace = inGDB
arcpy.env.overwriteOutput = True

arcpy.AddMessage("Searching the input workspace now...")

fcList = arcpy.ListFeatureClasses("*", "point")

fcCount = len(fcList)

arcpy.AddMessage("There are {0} layers in the workspace".format(fcCount))

# Set the progressor
arcpy.SetProgressor("step", "Sorting now...", 0, fcCount, 1)

for fc in fcList:
    try:
        outPath = os.path.join(inGDB + "\\" + fc + "_sorted")
        arcpy.Sort_management(fc, outPath,
                              [["ApplicationNumber", "ASCENDING"]])

        arcpy.SetProgressorPosition()

        arcpy.AddMessage("{0} has been sorted".format(fc))
    except:
        arcpy.AddMessage("{0} has a problem".format(fc))

        arcpy.SetProgressorPosition()

arcpy.AddMessage("Sort is complete.")
    #The annual average solar radiation map is given and the annual average precipitation maps are used for this analysis

    arcpy.AddMessage("Starting solar power plant analysis....")    

    #As the first step, the two shapefiles are merged, using identity analysis
    arcpy.Identity_analysis(datapath+"CASolar.shp",datapath+"Rainfall.shp", temp+"CASolRain.shp")

    #Make a temporary layer for selection analysis
    arcpy.MakeFeatureLayer_management(temp+"CASolRain.shp", temp+"CASol_lyr")

    #Filter the combined shapefile to remove the locations of existing solar projects, culturally sensitive lands, military lands, and potential wilderness
    filter_areas(temp+"CASol_lyr", "solar")

    #The solar file is sorted on high annual average solar radiation, followed by low annual average rainfall
    arcpy.Sort_management(temp+"CASol_lyr", temp+"CASolarSorted.shp", [[ "GHIANN", "DESCENDING"], ["RANGE", "ASCENDING"]])

    arcpy.Delete_management(temp+"CASol_lyr")
    arcpy.Delete_management(temp+"CASolRain.shp")

    #Solar power plant energy generation calculation:
    #Area: Each power plant is assumed to be of 2000 acre size = 8.09X10^6 sq. m 
    #Efficiency: The conversion efficiency from insolation to electricity is assumed to be 20%
    #The annual electricity generated in GWh = [(Annual GHI in kWh/sq.m-day) * Area (sq.m) * Efficiency * 365 days]/1000
    #The constant multiplier is approx. 0.6

    #Create a shapefile for new solar locations
    arcpy.CreateFeatureclass_management(newpath, "New_Solar.shp", "POINT", "", "DISABLED", "DISABLED", temp+"CASolarSorted.shp")
    arcpy.AddField_management(newpath+"New_Solar.shp", "GHIANN", "LONG")
    soldesc = arcpy.Describe(newpath+"New_Solar.shp")
    solshapename = soldesc.shapeFieldName
Example #23
0
    if str(DeleteTF) == "true":
        nstep += 1
    ncurrentstep = 1

    #/segmentation of the polyline
    arcpy.AddMessage(
        "Using the SLEM script to segment the in-polyline feature - Step " +
        str(ncurrentstep) + "/" + str(nstep))
    SplitLine = dS.SLEM(inFC, SegmentationStep,
                        "%ScratchWorkspace%\\SplitLine", ScratchW, DeleteTF)

    ncurrentstep += 1
    arcpy.AddMessage("Sorting the segmented line - Step " + str(ncurrentstep) +
                     "/" + str(nstep))
    Sort = arcpy.Sort_management(
        SplitLine, OutputSeg,
        [["Rank_UGO", "ASCENDING"], ["Distance", "ASCENDING"]])

    arcpy.AddField_management(Sort, "Rank_DGO", "LONG", "", "", "", "",
                              "NULLABLE", "NON_REQUIRED")
    fieldname = [f.name for f in arcpy.ListFields(Sort)]
    arcpy.CalculateField_management(Sort, "Rank_DGO",
                                    "!" + str(fieldname[0]) + "!",
                                    "PYTHON_9.3")

    #===============================================================================
    # DELETING TEMPORARY FILES
    #===============================================================================
    if str(DeleteTF) == "true":
        ncurrentstep += 1
        arcpy.AddMessage("Deleting Temporary Files - Step " +
Example #24
0
def main():
    ''' main '''
    #UPDATE
    # Create a feature layer from the input point features if it is not one already
    #df = arcpy.mapping.ListDataFrames(mxd)[0]
    pointFeatureName = os.path.basename(pointFeatures)
    layerExists = False

    try:
        # Check that area to number is a polygon
        descArea = arcpy.Describe(areaToNumber)
        areaGeom = descArea.shapeType
        arcpy.AddMessage("Shape type: " + str(areaGeom))
        if (descArea.shapeType != "Polygon"):
            raise Exception("ERROR: The area to number must be a polygon.")

        gisVersion = arcpy.GetInstallInfo()["Version"]
        global appEnvironment
        appEnvironment = Utilities.GetApplication()
        if DEBUG == True: arcpy.AddMessage("App environment: " + appEnvironment)

        global mxd
        global df
        global aprx
        global mp
        global mapList
        # mxd, df, aprx, mp = None, None, None, None
        #if gisVersion == "1.0": #Pro:
        if appEnvironment == "ARCGIS_PRO":
            from arcpy import mp
            aprx = arcpy.mp.ArcGISProject("CURRENT")
            mapList = aprx.listMaps()[0]
            for lyr in mapList.listLayers():
                if lyr.name == pointFeatureName:
                    layerExists = True
        #else:
        if appEnvironment == "ARCMAP":
            from arcpy import mapping
            mxd = arcpy.mapping.MapDocument('CURRENT')
            df = arcpy.mapping.ListDataFrames(mxd)[0]
            for lyr in arcpy.mapping.ListLayers(mxd):
                if lyr.name == pointFeatureName:
                    layerExists = True

        if layerExists == False:
            arcpy.MakeFeatureLayer_management(pointFeatures, pointFeatureName)
        else:
            pointFeatureName = pointFeatures

        # Select all the points that are inside of area
        arcpy.AddMessage("Selecting points from (" + str(os.path.basename(pointFeatureName)) +\
                         ") inside of the area (" + str(os.path.basename(areaToNumber)) + ")")
        selectionLayer = arcpy.SelectLayerByLocation_management(pointFeatureName, "INTERSECT",
                                                                areaToNumber, "#", "NEW_SELECTION")
        if DEBUG == True:
            arcpy.AddMessage("Selected " + str(arcpy.GetCount_management(pointFeatureName).getOutput(0)) + " points")

        # If no output FC is specified, then set it a temporary one, as this will be copied to the input and then deleted.
        # Sort layer by upper right across and then down spatially,
        overwriteFC = False
        global outputFeatureClass
        if outputFeatureClass == "":
            outputFeatureClass = "tempSortedPoints"
            overwriteFC = True;
        arcpy.AddMessage("Sorting the selected points geographically, right to left, top to bottom")
        arcpy.Sort_management(selectionLayer, outputFeatureClass, [["Shape", "ASCENDING"]])


        # Number the fields
        arcpy.AddMessage("Numbering the fields")
        i = 1
        cursor = arcpy.UpdateCursor(outputFeatureClass)
        for row in cursor:
            row.setValue(numberingField, i)
            cursor.updateRow(row)
            i += 1


        # Clear the selection
        arcpy.AddMessage("Clearing the selection")
        arcpy.SelectLayerByAttribute_management(pointFeatureName, "CLEAR_SELECTION")


        # Overwrite the Input Point Features, and then delete the temporary output feature class
        targetLayerName = ""
        if (overwriteFC):
            arcpy.AddMessage("Copying the features to the input, and then deleting the temporary feature class")
            desc = arcpy.Describe(pointFeatureName)
            overwriteFC = os.path.join(os.path.sep, desc.path, pointFeatureName)
            fields = (numberingField, "SHAPE@")
            overwriteCursor = arcpy.da.UpdateCursor(overwriteFC, fields)
            for overwriteRow in overwriteCursor:
                sortedPointsCursor = arcpy.da.SearchCursor(outputFeatureClass, fields)
                for sortedRow in sortedPointsCursor:
                    if sortedRow[1].equals(overwriteRow[1]):
                        overwriteRow[0] = sortedRow[0]
                overwriteCursor.updateRow(overwriteRow)
            arcpy.Delete_management(outputFeatureClass)

            #UPDATE
            #if layerExists == False:
                #layerToAdd = arcpy.mapping.Layer(pointFeatureName)
                #arcpy.mapping.AddLayer(df, layerToAdd, "AUTO_ARRANGE")
            targetLayerName = pointFeatureName
        else:
            #UPDATE
            #layerToAdd = arcpy.mapping.Layer(outputFeatureClass)
            #arcpy.mapping.AddLayer(df, layerToAdd, "AUTO_ARRANGE")
            targetLayerName = os.path.basename(outputFeatureClass)


        # Get and label the output feature
        if appEnvironment == "ARCGIS_PRO":
            results = arcpy.MakeFeatureLayer_management(outputFeatureClass, targetLayerName).getOutput(0)
            mapList.addLayer(results, "AUTO_ARRANGE")
            layer = findLayerByName(targetLayerName)
            if(layer):
                labelFeatures(layer, numberingField)
        elif appEnvironment == "ARCMAP":
            arcpy.AddMessage("Adding features to map (" + str(targetLayerName) + ")...")
            arcpy.MakeFeatureLayer_management(outputFeatureClass, targetLayerName)
            layer = arcpy.mapping.Layer(targetLayerName)
            arcpy.mapping.AddLayer(df, layer, "AUTO_ARRANGE")
            arcpy.AddMessage("Labeling output features (" + str(targetLayerName) + ")...")
            layer = findLayerByName(targetLayerName)
            if (layer):
                labelFeatures(layer, numberingField)
        else:
            arcpy.AddMessage("Non-map application, skipping labeling...")


        arcpy.SetParameter(3, outputFeatureClass)


    except arcpy.ExecuteError:
        # Get the tool error messages
        msgs = arcpy.GetMessages()
        arcpy.AddError(msgs)
        print(msgs)

    except:
        # Get the traceback object
        tb = sys.exc_info()[2]
        tbinfo = traceback.format_tb(tb)[0]

        # Concatenate information together concerning the error into a message string
        pymsg = "PYTHON ERRORS:\nTraceback info:\n" + tbinfo + "\nError Info:\n" + str(sys.exc_info()[1])
        msgs = "ArcPy ERRORS:\n" + arcpy.GetMessages() + "\n"

        # Return python error messages for use in script tool or Python Window
        arcpy.AddError(pymsg)
        arcpy.AddError(msgs)

        # Print Python error messages for use in Python / Python Window
        print(pymsg + "\n")
        print(msgs)
def createSegments(contour_at_mean_high_water, contour_at_surge):
    # Start a timer  
    time1 = time.clock()
    arcpy.AddMessage("\nSegmentation of the coastline started at "+str(datetime.now()))

    # Specify a tolerance distance or minimum length of a seawall
    # Users are not yet given control of this
    th = 150

    # Create random points along the lines (mean high water and the surge of choice)
    # The numbers used are just my choice based on iterative observations
    random0 = arcpy.CreateRandomPoints_management(out_path= arcpy.env.workspace, \
                                                out_name= "random0", \
                                                constraining_feature_class= contour_at_mean_high_water, \
                                                number_of_points_or_field= long(1600), \
                                                  minimum_allowed_distance = "{0} Feet".format(th))

    random1 = arcpy.CreateRandomPoints_management(out_path= arcpy.env.workspace, \
                                                    out_name= "random1", \
                                                    constraining_feature_class= contour_at_surge, \
                                                    number_of_points_or_field= long(1600), \
                                                  minimum_allowed_distance = "{0} Feet".format(th))

    # Perform a proximity analysis with the NEAR tool 
    arcpy.Near_analysis(random0, random1)
    # Give each point a fixed unique ID
    # Create the ID field
    arcpy.AddField_management (random0, "UniqueID", "SHORT")
    arcpy.AddField_management (random1, "UniqueID", "SHORT")
    # Add Unique IDs 
    arcpy.CalculateField_management(random0, "UniqueID", "[FID]")
    arcpy.CalculateField_management(random1, "UniqueID", "[FID]")

    # Categorize/Separate each feature based on their near feature
    # Crate a table view of random0
    table0 = arcpy.MakeTableView_management(random0, "random0_table")
    #table1 = arcpy.MakeTableView_management(random1, "random1_table")
    # Sort the near feature for each points in random0 
    random0_sorted = arcpy.Sort_management(table0, "random0_sorte.dbf", [["NEAR_FID", "ASCENDING"]])


    # Create "long enough" lists for each of the field of interests: ID, NEAR_ID, and NEAR_DIST
    # (distance to closest point). I added [99999] here to extend the list length and avoid IndexError
    list_fid = [r.getValue("UniqueID") for r in arcpy.SearchCursor(random0_sorted, ["UniqueID"])] +[99999]
    list_nearid = [r.getValue("NEAR_FID") for r in arcpy.SearchCursor(random0_sorted, ["NEAR_FID"])]\
                  +[99999]
    list_neardist = [r.getValue("NEAR_DIST") for r in arcpy.SearchCursor(random0_sorted, ["NEAR_DIST"])]\
                    +[99999]

    del r

    # Only take points with near feature within the specified threshold. If it's too far, it's not better
    # than the others for a segment point
    list_fid_filtered = [i for i in list_neardist if i < th]
    # Then initiate a list o contain their Unique ID and Near ID
    first_unique_id = [] 
    first_near_id = []
    # Get NEAR_ID and Unique ID for each of these points
    for i in list_fid_filtered:
        first_unique_id.append(list_fid[list_neardist.index(i)])
        first_near_id.append(list_nearid[list_neardist.index(i)])

    # Only take the unique values in case there are duplicates. This shoudn't happen. Just to make sure.
    first_unique_id = [i for i in set(first_unique_id)]
    first_near_id = [i for i in set(first_near_id)]


    # Now create a new feature out of these points
    # Frist let's create a Feature Layer
    arcpy.MakeFeatureLayer_management("random0.shp", "random0_lyr")
    # Let's select all points and export them into a new feature
    random0_points = arcpy.SearchCursor(random0, ["UniqueID"])
    point0 = random0_points.next()

    for point0 in random0_points:
        for i in range(len(first_unique_id)):
            if point0.getValue("UniqueID") == first_unique_id[i]:
                selector0 = arcpy.SelectLayerByAttribute_management(\
                     "random0_lyr", "ADD_TO_SELECTION", '"UniqueID" = {0}'.format(first_unique_id[i]))

    del point0, random0_points
     
    new_random0 = arcpy.CopyFeatures_management(selector0, "new_random0")
    arcpy.Delete_management('random0_lyr')
    

    # Now for the new point feature, remove clusters of points around them and take only the ones
    # with minimum NEAR_DIST
    # First, get the geometry attributes of the new points
    arcpy.AddGeometryAttributes_management(new_random0, "POINT_X_Y_Z_M", "", "", "")

    # Create long enough list of the field of interest (same as the previous) 
    pointx = [r.getValue("POINT_X") for r in arcpy.SearchCursor(new_random0, ["POINT_X"])] +[99999]
    pointy = [r.getValue("POINT_Y") for r in arcpy.SearchCursor(new_random0, ["POINT_Y"])] +[99999]
    new_list_fid = [r.getValue("UniqueID") for r in arcpy.SearchCursor(new_random0, ["UniqueID"])]\
                   +[99999]
    new_list_nearid = [r.getValue("NEAR_FID") for r in arcpy.SearchCursor(new_random0, ["NEAR_FID"])]\
                      +[99999]
    new_list_neardist = [r.getValue("NEAR_DIST") for r in arcpy.SearchCursor(new_random0, ["NEAR_DIST"])]\
                        +[99999]

    del r


    # Initiate a list of every points that has already been compared to the near points
    garbage = []
    # Also initiate a list for the new Unique ID and NEAR ID
    new_unique_ID = []
    new_near_ID = []
    # Then, check if the points are right next to them. If so, add them to a temporary list
    # and find the one with closest near ID (or find minimum of their NEAR_DIST)
    for i in range(len(pointx)):
        if i+1 < len(pointx):
             
            # If not within the th range 
            if not calculateDistance(pointx[i], pointy[i], pointx[i+1], pointy[i+1]) < float(th)*1.5:
                # Skip if it's in garbage 
                if new_list_nearid[i] in garbage:
                    continue
                else:
                    new_unique_ID.append(new_list_fid[i])
                    new_near_ID.append(new_list_nearid[i])

            # If within the range        
            else:
                # Skip if it's in garbage 
                if new_list_nearid[i] in garbage:
                    continue
                else:
                    temp_ID = []
                    temp_NEAR = []
                    temp_DIST = []
                    while True:
                        temp_ID.append(new_list_fid[i])
                        temp_NEAR.append(new_list_nearid[i])
                        temp_DIST.append(new_list_neardist[i])
                        garbage.append(new_list_nearid[i])
                        i = i+1
                        # Stop when within the range again. And add the last point within the range
                        if not calculateDistance(pointx[i], pointy[i], pointx[i+1], pointy[i+1]) < 200:
                            temp_ID.append(new_list_fid[i])
                            temp_NEAR.append(new_list_nearid[i])
                            temp_DIST.append(new_list_neardist[i])
                            garbage.append(new_list_nearid[i])

                            # Calculate the minimum and get the Unique ID and Near ID  
                            minD = min(temp_DIST)
                            new_unique_ID.append(new_list_fid[new_list_neardist.index(minD)])
                            new_near_ID.append(new_list_nearid[new_list_neardist.index(minD)])

                            del temp_ID, temp_NEAR, temp_DIST
                            break


    # Now select these final points export them into new feature.
    # These are the end points for the segments to be created
    # First, make a layer out of all the random points
    arcpy.MakeFeatureLayer_management("random0.shp", "random0_lyr") 
    arcpy.MakeFeatureLayer_management("random1.shp", "random1_lyr") 

    # Then select and export the end points into feature0 and feature1
    # Based on new_unique_ID for random0
    random0_points = arcpy.SearchCursor(random0, ["UniqueID"])
    point0 = random0_points.next()
    for point0 in random0_points:
        for i in range(len(new_unique_ID)):
            if point0.getValue("UniqueID") == new_unique_ID[i]:
                selected0 = arcpy.SelectLayerByAttribute_management(\
                     "random0_lyr", "ADD_TO_SELECTION", '"UniqueID" = {0}'.format(new_unique_ID[i]))

    feature0 = arcpy.CopyFeatures_management(selected0, "feature0")

    # Based on new_near_ID for random1
    random1_points = arcpy.SearchCursor(random1, ["UniqueID"])
    point1 = random1_points.next()
    for point1 in random1_points:
        for k in range(len(new_near_ID)):
            if point1.getValue("UniqueID") == new_near_ID[k]:
                selected1 = arcpy.SelectLayerByAttribute_management(\
                     "random1_lyr", "ADD_TO_SELECTION", '"UniqueID" = {0}'.format(new_near_ID[k]))

    feature1 = arcpy.CopyFeatures_management(selected1, "feature1")

    del point0, point1, random0_points, random1_points 
    arcpy.Delete_management('random0_lyr')
    arcpy.Delete_management('random1_lyr')


    # Now for the actual create of the coastal segments
    # Which include creation of polygon and splitting the contours as the corresponding points
    # STEPS NECESSARY FOR POLYGON CREATION
    # Let's first add geometry attributes to these points
    arcpy.AddGeometryAttributes_management(feature0, "POINT_X_Y_Z_M", "", "", "")
    arcpy.AddGeometryAttributes_management(feature1, "POINT_X_Y_Z_M", "", "", "")

    # Let's create lines that connects points from feature0 to feature1 
    # Initiate a POLYLINE feature class for these lines
    arcpy.CreateFeatureclass_management (arcpy.env.workspace, "connector_lines.shp", "POLYLINE")

    # Then for each of the points in feature0, get the correspondingin feature1
    # And create a line for each of the two points
    with arcpy.da.SearchCursor(feature0, ["NEAR_FID", "POINT_X", "POINT_Y"]) as features0:
        for feat0 in features0:
                    
            with arcpy.da.SearchCursor(feature1, ["UniqueID", "POINT_X", "POINT_Y"]) as features1:
                x=0
                for feat1 in features1:
                    x = x+1
                    theseTwoPoints = []

                    if feat0[0] == feat1[0]:
                        # Get coordinates 
                        X0, Y0 = feat0[1], feat0[2]
                        X1, Y1 = feat1[1], feat1[2]
                        # Append coordinates
                        theseTwoPoints.append(arcpy.PointGeometry(arcpy.Point(X0, Y0)))
                        theseTwoPoints.append(arcpy.PointGeometry(arcpy.Point(X1, Y1)))
                        # Create line from the coordinates
                        subline = arcpy.PointsToLine_management(theseTwoPoints, "subline"+str(x)+".shp")
                        # Append all lines into one feature
                        lines = arcpy.Append_management(["subline"+str(x)+".shp"], "connector_lines.shp")
                        # Then delete subline as it's now unnecessary
                        arcpy.Delete_management(subline)

                        continue

    
    del feat0, feat1, features0, features1

    # Now that the connectors are created, let's split the segments 
    # Before splitting contours into segments, let's integrate the points and the segments
    # Just in case, there are misalignment
    arcpy.Integrate_management([contour_at_mean_high_water, feature0])
    arcpy.Integrate_management([contour_at_surge, feature1])
    segments0 = arcpy.SplitLineAtPoint_management(contour_at_mean_high_water, feature0, "segments0.shp", "10 Feet")
    segments1 = arcpy.SplitLineAtPoint_management(contour_at_surge, feature1, "segments1.shp", "10 Feet")
    # And let's give fixed unique ID for each segment
    arcpy.CalculateField_management(segments0, "Id", "[FID]")
    arcpy.CalculateField_management(segments1, "Id", "[FID]")

    # Now with the split segments and connector lines, let's make segment polygon of the segments
    almost_segment_polygons = arcpy.FeatureToPolygon_management([segments0, segments1, lines],\
                                                                "almost_segment_polygons.shp")
    # Adding unique ID to the segment polygons
    arcpy.CalculateField_management(almost_segment_polygons, "Id", "[FID]")
    
    # The Feature to Polygon process also created polygons that are surrounded by polygons
    # These are because these areas are surrounded by flooded areas at surge.
    # They are above the surge and technically safe. So, let's remove them.
    arcpy.MakeFeatureLayer_management(almost_segment_polygons, 'almost_segment_polygons_lyr')
    arcpy.MakeFeatureLayer_management(segments0, 'segments0_lyr')
    # Only the polygons within the mean_high_water segments are at risk
    arcpy.SelectLayerByLocation_management('almost_segment_polygons_lyr', 'INTERSECT', 'segments0_lyr')
    final_without_length = arcpy.CopyFeatures_management('almost_segment_polygons_lyr', 'final.shp')
    
    arcpy.Delete_management('segments0_lyr')
    arcpy.Delete_management('almost_segment_polygons_lyr')

    # For the new polygons, let's add the corresponding seawall length
    # Let's add Length field to both first
    arcpy.AddField_management(final_without_length, "Length", "SHORT")
    arcpy.AddField_management(segments0, "Length", "SHORT")
    # Calculation of the length
    with arcpy.da.UpdateCursor(segments0, ["SHAPE@LENGTH", "Length"]) as segments_0:  
         for segment_0 in segments_0:
              length = segment_0[0]
              segment_0[1] = length
              segments_0.updateRow(segment_0)
    del segment_0, segments_0

    # With spatial join, let's add these results to the segment polygons 
    final = spatialJoin(final_without_length, segments0, "Length", "Length", "max", "joined_segment.shp")

    # Delete the created but now unnecessary files 
    arcpy.Delete_management(random0)
    arcpy.Delete_management(random1)

    # Stop the timer 
    time2 = time.clock()

    arcpy.AddMessage("Seawall segments and regions successfully created. It took "\
                     +str(time2-time1)+" seconds")
    
    return final
        predictor = "pre1;pre2;pre3;pre4;pre5"
        arcpy.GeographicallyWeightedRegression_stats(
            spatialJoin_result, "DV", predictor, GWR_result, "ADAPTIVE",
            "BANDWIDTH PARAMETER", "#", "50", "#", "#", "#", "#", "#", "#")

        # reclassify GWR predicted value and get the FUI classification
        # Replicate the GWR result
        GWR_reclassify = output
        arcpy.Copy_management(GWR_result, GWR_reclassify)
        arcpy.AddField_management(GWR_reclassify, "ESI", "DOUBLE", 20, 5)

        # get the min and max predicted values
        GWR_min = GWR_reclassify[:-4] + "_min.shp"
        GWR_max = GWR_reclassify[:-4] + "_max.shp"

        arcpy.Sort_management(GWR_reclassify, GWR_min,
                              [["Predicted", "ASCENDING"]])
        arcpy.Sort_management(GWR_reclassify, GWR_max,
                              [["Predicted", "DESCENDING"]])

        enumeration_min = arcpy.SearchCursor(GWR_min)
        minValue = enumeration_min.next().getValue("Predicted")
        arcpy.AddMessage("Minimum predicted value:  " + str(minValue) + "\n")

        enumeration_max = arcpy.SearchCursor(GWR_max)
        maxValue = enumeration_max.next().getValue("Predicted")
        arcpy.AddMessage("Maximum predicted value:  " + str(maxValue) + "\n")

        increase = (maxValue - minValue) / 4
        first = increase + minValue
        second = increase * 2 + minValue
        third = increase * 3 + minValue
Example #27
0
    "OID@", "CATCHMENT_ID", "Ground_Sum", "Understory_Sum", "Overstory_Sum"
]
##search_fields = ["OID@","CATCHMENT_ID","ZonalSt_"+naming[:4]+"1","ZonalSt_"+naming[:4]+"2","ZonalSt_"+naming[:4]+"3"]

update_fields = [
    "OID@", "SHAPE@AREA", "GROUND_COUNT", "UNDER_COUNT", "OVER_COUNT",
    "Percent_Ground", "Percent_Understory", "Percent_Overstory"
]

modelist_ground = []
modelist_under = []
modelist_over = []

#sort random points by cid so that cursor works
sorttable = os.path.join(env.workspace, naming + "_sorttable_")
arcpy.Sort_management(randos, sorttable, [["CATCHMENT_ID", "ASCENDING"]])

CID_counter = 0
previous_CID = None

arcpy.AddMessage("Calculating Veg Stats...")
# iterate through points sorted by catchment id
with arcpy.da.SearchCursor(sorttable, (search_fields)) as search:

    for row in search:

        ##            arcpy.AddMessage("CID is "+str(row[1]))

        if row[1] != previous_CID or CID_counter > 0:
            #append points to li st for mode calculation
            modelist_ground.append(row[2])
Example #28
0
        ddp.currentPageID = ddp.getPageIDFromName(PIN)

        #Graphic table variable values
        tableHeight = 3
        tableWidth = 2.4
        headerHeight = 0.2
        rowHeight = 0.15
        upperX = 8.1
        upperY = 7.2

        #Build selection set
        numRecords = int(arcpy.GetCount_management(FreqTable).getOutput(0))
        print "\t\tRecords in ag use by soil type table: " + str(numRecords)

        #Sort selection
        arcpy.Sort_management(FreqTable, r'in_memory\sort1',
                              [["TYPE", "ASCENDING"]])

        #Add note if there are no records > 100%
        if numRecords == 0:
            noGrowth.elementPositionX = 3
            noGrowth.elementPositionY = 2
        else:
            #if number of rows exceeds page space, resize row height
            if ((tableHeight - headerHeight) / numRecords) < rowHeight:
                headerHeight = headerHeight * (
                    (tableHeight - headerHeight) / numRecords) / rowHeight
                rowHeight = (tableHeight - headerHeight) / numRecords
            else:
                pass

        #Set and clone vertical line work
Example #29
0
    startPos = "LL"
elif (labelStartPos == "Lower-Right"):
    startPos = "LR"

# Import the custom toolbox with the fishnet tool in it, and run this. This had to be added to a model,
# because of a bug, which will now allow you to pass variables to the Create Fishnet tool.
#UPDATE
toolboxPath = os.path.dirname(sysPath) + "\\Clearing Operations Tools.tbx"
arcpy.ImportToolbox(toolboxPath)
arcpy.AddMessage("Creating Fishnet Grid")
arcpy.Fishnet_ClearingOperations(tempOutput, originCoordinate, yAxisCoordinate, str(cellWidth), str(cellHeight), 0, 0, oppCornerCoordinate, "NO_LABELS", templateExtent, "POLYGON")

# Sort the grid upper left to lower right, and delete the in memory one
arcpy.AddMessage("Sorting the grid for labeling")
tempSort = "tempSort"
arcpy.Sort_management(tempOutput, tempSort, [["Shape", "ASCENDING"]], startPos)
arcpy.Delete_management("in_memory")

# Add a field which will be used to add the grid labels
arcpy.AddMessage("Adding field for labeling the grid")
gridField = "Grid"
arcpy.AddField_management(tempSort, gridField, "TEXT")

# Number the fields
arcpy.AddMessage("Numbering the grids")
letterIndex = 1
secondLetterIndex = 1
letter = 'A'
secondLetter = 'A'
number = 1
lastY = -9999
Example #30
0
def generate_route_border_rule_table(workspace,route,route_id_field,boundary,boundary_id_field,buffer_size,route_border_rule_table,high_angle_threshold,offset):
    arcpy.AddMessage("Generating route border rule source table for {1}...".format(boundary))
    try:
        date = datetime.now()
        date_string = date.strftime("%m/%d/%Y")

        spatial_reference = arcpy.Describe(route).spatialReference
        xy_resolution = "{0} {1}".format(spatial_reference.XYResolution,spatial_reference.linearUnitName)

        ###############################################################################################################
        # get all candidate border routes
        arcpy.AddMessage("Identifying candidate border routes...")

        # generate boundary border
        boundary_border = os.path.join(workspace,"{0}_{1}_border".format(boundary,"boundary"))
        arcpy.FeatureToLine_management(boundary, boundary_border)

        # dissolve polygon boundary based on boundary id
        boundary_border_dissolved = os.path.join(workspace,"{0}_boundary_border_dissolved".format(boundary))
        arcpy.Dissolve_management(boundary_border,boundary_border_dissolved,[boundary_id_field])

        # generate buffer around boundary
        # arcpy.AddMessage("generate buffer around boundary")
        boundary_border_buffer = os.path.join(workspace,"{0}_{1}".format(boundary,"boundary_buffer"))
        arcpy.Buffer_analysis(boundary_border_dissolved, boundary_border_buffer, buffer_size, "FULL", "ROUND")

        # get candidate border route
        # arcpy.AddMessage("get candidate border route")
        candidate_border_route_multipart = "in_memory\\candidate_{0}_border_route_multipart".format(boundary)
        candidate_border_route = os.path.join(workspace,"candidate_{0}_border_route".format(boundary))
        arcpy.Clip_analysis(route, boundary_border_buffer, candidate_border_route_multipart)
        arcpy.MultipartToSinglepart_management(candidate_border_route_multipart, candidate_border_route)
        ################################################################################################################


        ################################################################################################################
        #  filter out candidate border routes that 'intersects' boundary at high angles
        arcpy.AddMessage("Filtering out candidate border routes that 'intersects' boundary at high angles...")

        route_buffer = os.path.join(workspace,"{0}_{1}".format(route,"buffer_flat"))
        if not arcpy.Exists(route_buffer):
            arcpy.Buffer_analysis(route, route_buffer, buffer_size, "FULL", "FLAT")

        # clip boundary segments within route buffer
        boundary_border_within_buffer_multipart = "in_memory\\{0}_boundary_within_{1}_buffer_multipart".format(boundary,route)
        boundary_border_within_buffer = os.path.join(workspace,"{0}_boundary_within_{1}_buffer".format(boundary,route))
        arcpy.Clip_analysis(boundary_border_dissolved, route_buffer, boundary_border_within_buffer_multipart)
        arcpy.MultipartToSinglepart_management(boundary_border_within_buffer_multipart, boundary_border_within_buffer)

        # Add 'SEGMENT_ID_ALL_CANDIDATES' field to candidate route and populate it with 'OBJECTID'
        arcpy.AddField_management(candidate_border_route,"SEGMENT_ID_ALL_CANDIDATES","LONG")
        arcpy.CalculateField_management(candidate_border_route, "SEGMENT_ID_ALL_CANDIDATES", "!OBJECTID!", "PYTHON")

        # Add 'ANGLE_ROUTE' field to candidate route and populate it with the angle to the true north(= 0 degree)
        arcpy.AddField_management(candidate_border_route,"ANGLE_ROUTE","DOUBLE")
        with arcpy.da.UpdateCursor(candidate_border_route,("SHAPE@","ANGLE_ROUTE")) as uCur:
            for row in uCur:
                shape = row[0]
                x_first = shape.firstPoint.X
                y_first = shape.firstPoint.Y
                x_last = shape.lastPoint.X
                y_last = shape.lastPoint.Y

                angle = calculate_angle(x_first,y_first,x_last,y_last)

                if angle >=0:
                    row[1]=angle
                    uCur.updateRow(row)

        # Add 'ANGLE_BOUNDARY' field to boundary segment within route buffer and populate it with the angle to the true north(= 0 degree)
        arcpy.AddField_management(boundary_border_within_buffer,"ANGLE_BOUNDARY","DOUBLE")
        with arcpy.da.UpdateCursor(boundary_border_within_buffer,("SHAPE@","ANGLE_BOUNDARY")) as uCur:
            for row in uCur:
                shape = row[0]
                x_first = shape.firstPoint.X
                y_first = shape.firstPoint.Y
                x_last = shape.lastPoint.X
                y_last = shape.lastPoint.Y

                angle = calculate_angle(x_first,y_first,x_last,y_last)

                if angle:
                    row[1]=angle
                    uCur.updateRow(row)

        del uCur

        # locate boundary segment within buffer along candidate border route.
        # assuming that if the boundary segment can't be located along its corresponding route, these two might have high angles.
        boundary_along_candidate_border_route = os.path.join(workspace,"{0}_boundary_along_candidate_{1}_border_route".format(boundary,boundary))
        arcpy.LocateFeaturesAlongRoutes_lr(boundary_border_within_buffer,candidate_border_route,"SEGMENT_ID_ALL_CANDIDATES",buffer_size,\
                                           boundary_along_candidate_border_route,"{0} {1} {2} {3}".format("RID","LINE","FMEAS","TMEAS"))

        arcpy.JoinField_management(boundary_along_candidate_border_route, "RID", candidate_border_route, "SEGMENT_ID_ALL_CANDIDATES", ["ANGLE_ROUTE"])


        positive_candidate_border_route = []
        with arcpy.da.SearchCursor(boundary_along_candidate_border_route,("RID","ANGLE_ROUTE","ANGLE_BOUNDARY")) as sCur:
            for row in sCur:
                sid = str(row[0])
                angle_route = row[1]
                angle_boundary = row[2]

                if angle_route and angle_boundary:
                    delta_angle = abs(angle_route-angle_boundary)

                    # get real intersecting angle
                    if delta_angle > 90 and delta_angle <= 270:
                        delta_angle = abs(180 - delta_angle)
                    elif delta_angle > 270:
                        delta_angle = 360 - delta_angle
                    else:
                        pass

                    # filter out negative candidate border route
                    if delta_angle <= high_angle_threshold:
                        if sid not in positive_candidate_border_route:
                            positive_candidate_border_route.append(sid)
        del sCur

        candidate_border_route_lyr = "in_memory\\candidate_border_route_lyr"
        arcpy.MakeFeatureLayer_management(candidate_border_route, candidate_border_route_lyr)
        candidate_border_route_positive = os.path.join(workspace,"candidate_{0}_border_route_positive".format(boundary))
        where_clause = "\"{0}\" IN ({1})".format("OBJECTID",",".join(positive_candidate_border_route))
        arcpy.SelectLayerByAttribute_management(candidate_border_route_lyr, "NEW_SELECTION", where_clause)
        arcpy.CopyFeatures_management(candidate_border_route_lyr,candidate_border_route_positive)

        candidate_border_route_negative = os.path.join(workspace,"candidate_{0}_border_route_negative".format(boundary))
        where_clause = "\"{0}\" NOT IN ({1})".format("OBJECTID",",".join(positive_candidate_border_route))
        arcpy.SelectLayerByAttribute_management(candidate_border_route_lyr, "NEW_SELECTION", where_clause)
        arcpy.CopyFeatures_management(candidate_border_route_lyr,candidate_border_route_negative)
        ################################################################################################################


        ################################################################################################################
        # get left, right boundary topology of positive candidate border route
        # handle candidate border route segment with different L/R boundary id by offset
        arcpy.AddMessage("Calculating L/R boundary topology of positive candidate border route...")

        # generate offset around boundary
        boundary_border_offset= os.path.join(workspace,"{0}_{1}".format(boundary,"boundary_offset"))
        arcpy.Buffer_analysis(boundary_border_dissolved, boundary_border_offset, offset, "FULL", "ROUND")

        # get intersections between positive candidate border route and boundary offset
        candidate_border_route_positive_boundary_offset_intersections = os.path.join(workspace,"candidate_{0}_border_route_positive_{1}_offset_intersections".format(boundary,boundary))
        arcpy.Intersect_analysis([candidate_border_route_positive,boundary_border_offset], candidate_border_route_positive_boundary_offset_intersections, "ALL", "", "point")

        # split positive candidate border route by intersections generated above
        candidate_border_route_positive_splitted_by_offset = os.path.join(workspace,"candidate_{0}_border_route_positive_splitted_by_offset".format(boundary))
        arcpy.SplitLineAtPoint_management(candidate_border_route_positive,candidate_border_route_positive_boundary_offset_intersections,\
                                          candidate_border_route_positive_splitted_by_offset,xy_resolution)

        # Add 'SEGMENT_ID_POSITIVE_CANDIDATES' field to splitted positive candidate route and populate it with 'OBJECTID'
        arcpy.AddField_management(candidate_border_route_positive_splitted_by_offset,"SEGMENT_ID_POSITIVE_CANDIDATES","LONG")
        arcpy.CalculateField_management(candidate_border_route_positive_splitted_by_offset, "SEGMENT_ID_POSITIVE_CANDIDATES", "!OBJECTID!", "PYTHON")

        # get positive candidate border route segments that within boundary offset
        candidate_border_route_positive_within_offset = os.path.join(workspace,"candidate_{0}_border_route_positive_within_offset".format(boundary))
        candidate_border_route_positive_splitted_by_offset_lyr = "in_memory\\candidate_{0}_border_route_positive_splitted_by_offset_lyr".format(boundary)
        arcpy.MakeFeatureLayer_management(candidate_border_route_positive_splitted_by_offset, candidate_border_route_positive_splitted_by_offset_lyr)
        arcpy.SelectLayerByLocation_management (candidate_border_route_positive_splitted_by_offset_lyr, "WITHIN", boundary_border_offset)
        arcpy.CopyFeatures_management(candidate_border_route_positive_splitted_by_offset_lyr,candidate_border_route_positive_within_offset)

        # get positive candidate border route segments that out of boundary offset
        candidate_border_route_positive_outof_offset = os.path.join(workspace,"candidate_{0}_border_route_positive_outof_offset".format(boundary))
        arcpy.SelectLayerByAttribute_management(candidate_border_route_positive_splitted_by_offset_lyr, "SWITCH_SELECTION")
        arcpy.CopyFeatures_management(candidate_border_route_positive_splitted_by_offset_lyr,candidate_border_route_positive_outof_offset)

        # generate offset around positive candidate border route within boundary offset
        # arcpy.AddMessage("generate offset around boundary")
        candidate_border_route_positive_within_offset_buffer= os.path.join(workspace,"candidate_{0}_border_route_positive_within_offset_buffer".format(boundary))
        arcpy.Buffer_analysis(candidate_border_route_positive_within_offset, candidate_border_route_positive_within_offset_buffer, offset, "FULL", "FLAT")

        # clip boundary segments within offset distance from positive candidate route that within boundary offset
        boundary_border_within_positive_candidate_border_route_buffer_multipart = "in_memory\\{0}_boundary_within_positive_candidate_border_route_buffer_multipart".format(boundary)
        boundary_border_within_positive_candidate_border_route_buffer = os.path.join(workspace,"{0}_boundary_within_positive_candidate_border_route_buffer".format(boundary))
        arcpy.Clip_analysis(boundary_border_dissolved, candidate_border_route_positive_within_offset_buffer, boundary_border_within_positive_candidate_border_route_buffer_multipart)
        arcpy.MultipartToSinglepart_management(boundary_border_within_positive_candidate_border_route_buffer_multipart, boundary_border_within_positive_candidate_border_route_buffer)

        # get endpoints of boundary border within offset buffer of splitted positive candidate border routes
        boundary_border_within_positive_candidate_border_route_buffer_endpoints = os.path.join(workspace,"{0}_boundary_within_positive_candidate_border_route_buffer_endpoints".format(boundary))
        arcpy.FeatureVerticesToPoints_management(boundary_border_within_positive_candidate_border_route_buffer,\
                                                 boundary_border_within_positive_candidate_border_route_buffer_endpoints,"BOTH_ENDS")
        arcpy.DeleteIdentical_management(boundary_border_within_positive_candidate_border_route_buffer_endpoints, ["Shape"])

        # split boundary border within offset buffer of splitted positive candidate border routes and endpoints location
        # then delete identical shape
        boundary_border_within_positive_candidate_border_route_buffer_splitted_by_own_endpoints = os.path.join(workspace,"{0}_boundary_within_positive_candidate_border_route_buffer_splitted_by_own_endpoints".format(boundary))
        arcpy.SplitLineAtPoint_management(boundary_border_within_positive_candidate_border_route_buffer,boundary_border_within_positive_candidate_border_route_buffer_endpoints,\
                                          boundary_border_within_positive_candidate_border_route_buffer_splitted_by_own_endpoints,xy_resolution)
        arcpy.DeleteIdentical_management(boundary_border_within_positive_candidate_border_route_buffer_splitted_by_own_endpoints, ["Shape"])

        # Add 'SEGMENT_ID_BOUNDARY' field to boundary segments within offset distance from positive candidate route that within boundary offset and populate it with 'OBJECTID'
        arcpy.AddField_management(boundary_border_within_positive_candidate_border_route_buffer_splitted_by_own_endpoints,"SEGMENT_ID_BOUNDARY","LONG")
        arcpy.CalculateField_management(boundary_border_within_positive_candidate_border_route_buffer_splitted_by_own_endpoints, "SEGMENT_ID_BOUNDARY", "!OBJECTID!", "PYTHON")

        # locate boundary segments within offset distance of positive candidate route that within boundary offset along positive candidate route that within boundary offset
        boundary_border_within_positive_candidate_border_route_buffer_along_candidate_border_route = os.path.join(workspace,"{0}_boundary_border_within_positive_candidate_border_route_buffer_along_candidate_border_route".format(boundary))
        arcpy.LocateFeaturesAlongRoutes_lr(boundary_border_within_positive_candidate_border_route_buffer_splitted_by_own_endpoints,candidate_border_route_positive_within_offset,"SEGMENT_ID_POSITIVE_CANDIDATES",offset,\
                                           boundary_border_within_positive_candidate_border_route_buffer_along_candidate_border_route,"{0} {1} {2} {3}".format("RID","LINE","FMEAS","TMEAS"))

        # get left, right boundary topology of boundary within offset distance of positive candidate route that within boundary offset along positive candidate route that within boundary offset
        boundary_border_within_positive_candidate_border_route_buffer_with_polygon_topology_allcases= os.path.join(workspace,"{0}_boundary_border_within_positive_candidate_border_route_buffer_with_{1}_topology_allcases".format(boundary,boundary))
        arcpy.Identity_analysis(boundary_border_within_positive_candidate_border_route_buffer_splitted_by_own_endpoints, boundary, boundary_border_within_positive_candidate_border_route_buffer_with_polygon_topology_allcases,"ALL","","KEEP_RELATIONSHIPS")

        boundary_border_within_positive_candidate_border_route_buffer_with_polygon_topology_allcases_lyr = "in_memory\\{0}_boundary_border_within_positive_candidate_border_route_buffer_with_{1}_topology_allcases_lyr".format(boundary,boundary)
        arcpy.MakeFeatureLayer_management(boundary_border_within_positive_candidate_border_route_buffer_with_polygon_topology_allcases, boundary_border_within_positive_candidate_border_route_buffer_with_polygon_topology_allcases_lyr)

        where_clause = "\"{0}\"<>0 AND \"{1}\"<>0".format("LEFT_{0}".format(boundary),"RIGHT_{0}".format(boundary))
        arcpy.SelectLayerByAttribute_management(boundary_border_within_positive_candidate_border_route_buffer_with_polygon_topology_allcases_lyr, "NEW_SELECTION", where_clause)
        boundary_border_within_positive_candidate_border_route_buffer_with_polygon_topology = os.path.join(workspace,"{0}_boundary_border_within_positive_candidate_border_route_buffer_with_{1}_topology".format(boundary,boundary))
        arcpy.CopyFeatures_management(boundary_border_within_positive_candidate_border_route_buffer_with_polygon_topology_allcases_lyr,boundary_border_within_positive_candidate_border_route_buffer_with_polygon_topology)

        arcpy.JoinField_management(boundary_border_within_positive_candidate_border_route_buffer_along_candidate_border_route,"SEGMENT_ID_BOUNDARY",\
                                   boundary_border_within_positive_candidate_border_route_buffer_with_polygon_topology,"SEGMENT_ID_BOUNDARY",["LEFT_{0}".format(boundary_id_field),"RIGHT_{0}".format(boundary_id_field)])

        arcpy.JoinField_management(candidate_border_route_positive_within_offset,"SEGMENT_ID_POSITIVE_CANDIDATES",\
                                   boundary_border_within_positive_candidate_border_route_buffer_along_candidate_border_route,"RID",["SEGMENT_ID_BOUNDARY","LEFT_{0}".format(boundary_id_field),"RIGHT_{0}".format(boundary_id_field)])

        candidate_border_route_positive_within_offset_lyr = "in_memory\\candidate_{0}_border_route_positive_within_offset_lyr".format(boundary)
        arcpy.MakeFeatureLayer_management(candidate_border_route_positive_within_offset, candidate_border_route_positive_within_offset_lyr)
        where_clause = "\"{0}\"IS NOT NULL AND \"{1}\"IS NOT NULL".format("LEFT_{0}".format(boundary_id_field),"RIGHT_{0}".format(boundary_id_field))
        arcpy.SelectLayerByAttribute_management(candidate_border_route_positive_within_offset_lyr, "NEW_SELECTION", where_clause)
        candidate_border_route_positive_within_offset_with_polygon_topology = os.path.join(workspace,"candidate_{0}_border_route_positive_within_offset_with_{1}_topology".format(boundary,boundary))
        arcpy.CopyFeatures_management(candidate_border_route_positive_within_offset_lyr,candidate_border_route_positive_within_offset_with_polygon_topology)

        # get left, right boundary topology of candidate border route out of boundary offset
        candidate_border_route_positive_outof_offset_with_polygon_topology_allcases= os.path.join(workspace,"candidate_{0}_border_route_positive_outof_offset_with_{1}_topology_allcases".format(boundary,boundary))
        arcpy.Identity_analysis(candidate_border_route_positive_outof_offset, boundary, candidate_border_route_positive_outof_offset_with_polygon_topology_allcases,"ALL","","KEEP_RELATIONSHIPS")

        candidate_border_route_positive_outof_offset_with_polygon_topology_allcases_lyr = "in_memory\\candidate_{0}_border_route_positive_outof_offset_with_polygon_topology_allcases_lyr".format(boundary)
        arcpy.MakeFeatureLayer_management(candidate_border_route_positive_outof_offset_with_polygon_topology_allcases, candidate_border_route_positive_outof_offset_with_polygon_topology_allcases_lyr)
        where_clause = "\"{0}\"<>0 AND \"{1}\"<>0".format("LEFT_{0}".format(boundary),"RIGHT_{0}".format(boundary))
        arcpy.SelectLayerByAttribute_management(candidate_border_route_positive_outof_offset_with_polygon_topology_allcases_lyr, "NEW_SELECTION", where_clause)
        candidate_border_route_positive_outof_offset_with_polygon_topology = os.path.join(workspace,"candidate_{0}_border_route_positive_outof_offset_with_{1}_topology".format(boundary,boundary))
        arcpy.CopyFeatures_management(candidate_border_route_positive_outof_offset_with_polygon_topology_allcases_lyr,candidate_border_route_positive_outof_offset_with_polygon_topology)

        # merge
        candidate_border_route_positive_with_polygon_topology = "candidate_{0}_border_route_positive_with_{1}_topology".format(boundary,boundary)
        arcpy.FeatureClassToFeatureClass_conversion(candidate_border_route_positive_outof_offset_with_polygon_topology,workspace,candidate_border_route_positive_with_polygon_topology)
        arcpy.Append_management([candidate_border_route_positive_within_offset_with_polygon_topology],candidate_border_route_positive_with_polygon_topology,"NO_TEST")

        ################################################################################################################


        ################################################################################################################
        arcpy.AddMessage("Populate route_border_rule_table...")

        # calculate from measure and to measure of candidate border route
        # arcpy.AddMessage("Calculating from measure and to measure of candidate border routes...")
        arcpy.AddGeometryAttributes_management(candidate_border_route_positive_with_polygon_topology, "LINE_START_MID_END")

        # get candidte border route segment geometry
        arcpy.AddField_management(candidate_border_route_positive_with_polygon_topology,"SEGMENT_GEOMETRY","TEXT","","",100)
        arcpy.CalculateField_management(candidate_border_route_positive_with_polygon_topology,"SEGMENT_GEOMETRY","!shape.type!","PYTHON")

        # sort candidate border route segments based on route id and from measure, orderly
        # arcpy.AddMessage("sort validated output got above based on route id and from measure, orderly")
        candidate_border_route_positive_with_polygon_topology_sorted = os.path.join(workspace,"candidate_{0}_border_route_positive_with_polygon_topology_sorted".format(boundary))
        arcpy.Sort_management(candidate_border_route_positive_with_polygon_topology,candidate_border_route_positive_with_polygon_topology_sorted,[[route_id_field,"ASCENDING"],["START_M","ASCENDING"]])

        # create route_border_rule_table
        if arcpy.Exists(route_border_rule_table):
            arcpy.Delete_management(route_border_rule_table)
            create_route_border_rule_table_schema(workspace,route_border_rule_table)
        else:
            create_route_border_rule_table_schema(workspace,route_border_rule_table)

        # populate route_border_rule_table
        iCur = arcpy.da.InsertCursor(route_border_rule_table,["ROUTE_ID","ROUTE_START_MEASURE","ROUTE_END_MEASURE","BOUNDARY_LEFT_ID",\
                                                              "BOUNDARY_RIGHT_ID","SEGMENT_GEOMETRY","EFFECTIVE_FROM_DT","EFFECTIVE_TO_DT"])
        with arcpy.da.SearchCursor(candidate_border_route_positive_with_polygon_topology_sorted,[route_id_field,"START_M","END_M","LEFT_{0}".format(boundary_id_field),\
                                                                              "RIGHT_{0}".format(boundary_id_field),"SEGMENT_GEOMETRY","START_DATE","END_DATE"]) as sCur:
            for row in sCur:
                iCur.insertRow(row)

        del sCur
        del iCur

        arcpy.CalculateField_management(route_border_rule_table, "BRP_PROCESS_DT", "'{0}'".format(date_string), "PYTHON")
        ################################################################################################################

        arcpy.AddMessage("done!")

        return route_border_rule_table
    except Exception:
        # arcpy.AddMessage(traceback.format_exc())
        sys.exit(traceback.format_exc())
        return False