예제 #1
0
def GetStartingLine(glfID, rhID):
    with da.SearchCursor(pointFC, ['SHAPE@', 'NodeID', 'NodeType'], 'NodeType = {0} OR NodeType = {1}'.format(glfID, rhID)) as cursorOrigin:
        for pointOrigin in cursorOrigin:
            # 如果是锅炉房点,则遍历出水管
            if pointOrigin[2] == glfID:
                hltype = 1
                with da.UpdateCursor(LineFC, ['SHAPE@', 'LineID', 'S_NodeID', 'E_NodeID'], 'HLType = {0}'.format(hltype)) as cursorL:
                    for lineRL in cursorL:
                        if lineRL[2] == None and lineRL[3] == None:
                            if lineRL[0].touches(pointOrigin[0]) == True:
                                # 起点编号
                                lineRL[2] = pointOrigin[1]
                                # 终点编号
                                with da.SearchCursor(pointFC, ['SHAPE@', 'NodeID'], 'NodeType <> {0}'.format(pointOrigin[2])) as cursorP:
                                    for point in cursorP:
                                        if lineRL[0].touches(point[0]) == True:
                                            lineRL[3] = point[1]
                                            cursorL.updateRow(lineRL)
                                            print(u'出水管<{0}>:起点 <{1}>,终点 <{2}>.'.format(lineRL[1],lineRL[2],lineRL[3]))
            # 入户点,则遍历回水管
            elif pointOrigin[2] == rhID:
                hltype = 0
                with da.UpdateCursor(LineFC, ['SHAPE@', 'LineID', 'S_NodeID', 'E_NodeID'], 'HLType = {0}'.format(hltype)) as cursorL:
                    for lineRL in cursorL:
                        if lineRL[2] == None and lineRL[3] == None:
                            if lineRL[0].touches(pointOrigin[0]) == True:
                                # 起点编号
                                lineRL[3] = pointOrigin[1]
                                # 终点编号
                                with da.SearchCursor(pointFC, ['SHAPE@', 'NodeID'], 'NodeType <> {0}'.format(pointOrigin[2])) as cursorP:
                                    for point in cursorP:
                                        if lineRL[0].touches(point[0]) == True:
                                            lineRL[2] = point[1]
                                            cursorL.updateRow(lineRL)
                                            print(u'回水管<{0}>:起点 <{1}>,终点 <{2}>.'.format(lineRL[1],lineRL[2],lineRL[3]))
예제 #2
0
def Incrementer(layer):
    # Build a dict here
    # and use it to track how many times
    # a particular number has been seen
    # If dict[routeNumKey] does not exist
    # then, assign 000 to the dictionary and
    # the UniqueNum.
    # If it does exist, then increment by 1
    # and assign to the dictionary and the
    # UniqueNum.

    #this function calcualtes the unique number for multiple route ID numbers
    fields = ['UniqueNum1', 'RouteNum1']
    previousRouteNum = '0000'
    rows = da.UpdateCursor(layer, fields)
    counter = 0
    for row in rows:
        if previousRouteNum == row[1]:
            countstring = str(counter).zfill(3)
            row[0] = countstring
            rows.updateRow(row)
            counter += 1
            previousRouteNum = row[1]
        else:
            counter = 0
            countstring = str(counter).zfill(3)
            row[0] = countstring
            rows.updateRow(row)
            counter += 1
            previousRouteNum = row[1]
    del rows
def assignRouteNumbersToRail(fc, name_field, num_field):
    """Some fc's only have routes as names, they need to have the route number and
	this function provides that based on the route name"""

    rail_num_dict = {
        'ns': 193,
        'cl': 194,
        'ns/cl': '193,194',
        'r': 90,
        'b': 100,
        'y': 190,
        'g': 200,
        'o': 290
    }

    with da.UpdateCursor(fc, [name_field, num_field]) as cursor:
        for name, num in cursor:
            l_name = name.lower()
            if l_name in rail_num_dict:
                num = str(rail_num_dict[l_name])
            else:
                num_list = []
                for l in l_name:
                    num_list.append(rail_num_dict[l])

                num = ','.join([str(r) for r in num_list])

            cursor.updateRow((name, num))
예제 #4
0
def UpdateLineAttribute():
    # 遍历的 lineRL2 出水管的起点编号和终点编号都 非空
    fields = ['SHAPE@', 'LineID', 'S_NodeID', 'E_NodeID', 'HLType']
    with da.SearchCursor(LineFC, fields, 'S_NodeID IS NOT NULL AND E_NodeID IS NOT NULL') as cursorL2:
        for lineRL2 in cursorL2:
            # 遍历的 lineRL1 出水管的起点编号和终点编号都 空
            with da.UpdateCursor(LineFC, fields, 'HLType = {0} AND S_NodeID IS NULL AND E_NodeID IS NULL'.format(lineRL2[4])) as cursorL1:
                for lineRL1 in cursorL1:
                    # 如果两条线相连接
                    if lineRL1[0].touches(lineRL2[0]):
                        if lineRL1[4] == 1:         # 出水管起点
                            lineRL1[2] = lineRL2[3]
                        elif lineRL1[4] == 0:       # 回水管终点
                            lineRL1[3] = lineRL2[3]
                        with da.SearchCursor(pointFC, ['SHAPE@', 'NodeID']) as cursorP2:
                            # 遍历所有管点
                            for point2 in cursorP2:
                                # 如果管点与空线touch,且不是线起点,则为终点赋值
                                if point2[0].touches(lineRL1[0]) and lineRL1[2] != point2[1]:
                                    if lineRL1[4] == 1:        # 出水管
                                        lineRL1[3] = point2[1]
                                    elif lineRL1[4] == 0:      # 回水管
                                        lineRL1[2] = point2[1]
                                    cursorL1.updateRow(lineRL1)
                                    print(u'## <{0}>: 起点 <{1}>,终点 <{2}>.'.format(lineRL1[1], lineRL1[2], lineRL1[3]))
            del lineRL1
            del cursorL1
예제 #5
0
    def addFields2FC(self, candidateFields, fieldOrder=[]):

        #### Create/Verify Result Field Order ####
        fieldKeys = candidateFields.keys()
        fieldKeys.sort()
        if len(fieldOrder) == len(fieldKeys):
            fKeySet = set(fieldKeys)
            fieldOrderSet = set(fieldOrder)
            if fieldOrderSet == fKeySet:
                fieldKeys = fieldOrder

            del fKeySet, fieldOrderSet

        #### Add Empty Output Analysis Fields ####
        outputFieldNames = [self.masterField]
        for fieldInd, fieldName in enumerate(fieldKeys):
            field = candidateFields[fieldName]
            field.copy2FC(self.inputFC)
            outputFieldNames.append(fieldName)

            #### Replace NaNs for Shapefiles ####
            if self.shapeFileBool:
                if field.type != "TEXT":
                    isNaN = NUM.isnan(field.data)
                    if NUM.any(isNaN):
                        field.data[isNaN] = UTILS.shpFileNull[field.type]

        #### Populate Output Feature Class with Values ####
        ARCPY.SetProgressor("step", ARCPY.GetIDMessage(84003), 0, self.numObs,
                            1)
        outRows = DA.UpdateCursor(self.inputFC, outputFieldNames)

        for row in outRows:
            masterID = row[0]
            if self.master2Order.has_key(masterID):
                order = self.master2Order[masterID]

                #### Create Output Row from Input ####
                resultValues = [masterID]

                #### Add Result Values ####
                for fieldName in fieldKeys:
                    field = candidateFields[fieldName]
                    fieldValue = field.data.item(order)
                    resultValues.append(fieldValue)

                #### Insert Values into Output ####
                outRows.updateRow(resultValues)

            else:
                #### Bad Record, Input: Do Not Delete Record ####
                pass

            ARCPY.SetProgressorPosition()

        #### Clean Up ####
        del outRows
예제 #6
0
def update_with_results(features, fields, results):
    with da.UpdateCursor(features, ['OID@'] + list(fields)) as cursor:
        for row in cursor:
            if row[0] in results:
                try:
                    for i in xrange(0, len(fields)):
                        row[i + 1] = results[row[0]][i]
                except:
                    for i in range(0, len(fields)):
                        row[i + 1] = results[row[0]][i]

            cursor.updateRow(row)
예제 #7
0
파일: Update_IDs.py 프로젝트: falayet/rFHL
def CalcField(table, temp_field, original_field):
    # Check if table is empty
    result = arcpy.GetCount_management(table)
    count = int(result.getOutput(0))
    #
    if count > 0:
        fields = [temp_field, original_field]
        arcpy.AddMessage("-Updating " + temp_field + " with " +
                         original_field + " in " + table)
        with da.UpdateCursor(table, fields) as cursor:
            for row in cursor:
                row[0] = row[1]
                cursor.updateRow(row)
예제 #8
0
파일: Update_IDs.py 프로젝트: falayet/rFHL
def AssignAutoIncrementingID(table, field):
    result = arcpy.GetCount_management(table)
    count = int(result.getOutput(0))
    if count == 0:
        pass
        arcpy.AddMessage("-" + table + " is empty")
    else:
        # Populate target field with incrementing ID
        with da.UpdateCursor(table, field) as cursor:
            arcpy.AddMessage("-Assigning ID to " + field + " in " + table +
                             "(" + str(count) + " rows)")
            for row in cursor:
                row[0] = autoIncrement()
                cursor.updateRow(row)
예제 #9
0
def PointGEOM(fc, tbl, workspace, layer_name, fields):
    #Updates the Geometry point location based on the XY attributes in the GIS table, run this after the XY attributes have been updated
    try:
        MakeFeatureLayer_management(fc, layer_name)
        #the tolerance is how close a lat/long field value must match the coordinate position
        Tolerance = 0.000001
        #start the edit operation using the DA cursor
        edit = da.Editor(workspace)  # @UndefinedVariable
        edit.startEditing()
        edit.startOperation()
        with da.UpdateCursor(fc, fields) as ucursor:  # @UndefinedVariable
            for row in ucursor:
                #rows 0 and 1 are the lat long fields in the table
                point = Point(row[0], row[1])
                #row 2 is the geometry lat long tuple, and needs to be split in to lat/long parts
                rowx, rowy = (row[2])
                rowvalues = (row[0], row[1], point, datetime.datetime.now())
                #compare the lat long table values to the point location
                if (type(rowx) == float):
                    intolX = abs(row[0] - rowx)
                    intolY = abs(row[1] - rowy)
                    if intolX < Tolerance and intolY < Tolerance:
                        pass
                    else:
                        #if the shape needs to be adjusted, this will update the coordinate position from the feild info
                        point = Point(row[0], row[1])
                        rowvalues = (row[0], row[1], point,
                                     datetime.datetime.now())
                        print "these rows are outside the position tolerance:"
                        print(rowvalues)
                        ucursor.updateRow(rowvalues)
                    #print (rowvalues)
                else:
                    point = Point(row[0], row[1])
                    rowvalues = (row[0], row[1], point,
                                 datetime.datetime.now())
                    print "these rows need to be calculated:"
                    print(rowvalues)
                    ucursor.updateRow(rowvalues)
        edit.stopOperation()
        edit.stopEditing(True)
        del layer_name, fc, fields, workspace
        print "point geometry updated"
    except ExecuteError:
        print(GetMessages(2))
        endingTime = datetime.datetime.now()
        ScriptStatusLogging('POINT_UPDATE_PROD.py', 'CIIMS.Static_Crossings',
                            scriptFailure, startingTime, endingTime,
                            GetMessages(2))
예제 #10
0
 def attribute_tile_lidar(in_feature_class,
                          in_tile_name,
                          in_df,
                          in_name,
                          xlsx_row_name=xlsx_row_name):
     with da.UpdateCursor(in_feature_class,
                          [in_tile_name, in_name]) as cursor:
         for fc_r in cursor:
             for df_i, df_r in in_df.iterrows():
                 url = df_r[xlsx_row_name]
                 n = split(url)[1]
                 t_name = fc_r[0]
                 if n == t_name:
                     fc_r[1] = url
             cursor.updateRow(fc_r)
예제 #11
0
 def attribute_tile(in_feature_class,
                    in_tile_name,
                    in_df,
                    in_name,
                    xlsx_row_name=xlsx_row_name):
     with da.UpdateCursor(in_feature_class,
                          [in_tile_name, in_name]) as cursor:
         for fc_r in cursor:
             for df_i, df_r in in_df.iterrows():
                 url = df_r[xlsx_row_name]
                 n = Path(url).stem
                 t_name = fc_r[0]
                 t_n = Path(t_name).stem
                 if n.startswith(in_name) and t_n in n:
                     fc_r[1] = url
             cursor.updateRow(fc_r)
def generateMatchCode():
    """Generate a code for the collapse dual carriageway tool that will indicate if two
	segments are eligible to be snapped to each other"""

    service_dict = {'frequent': 1, 'standard': 2, 'rush-hour': 3}

    # create a copy of service_level_routes.shp so that the original is not modified
    management.CopyFeatures(serv_level_routes_src, serv_level_routes)

    merge_field, f_type = 'merge_id', 'LONG'
    management.AddField(serv_level_routes, merge_field, f_type)

    u_fields = ['serv_level', 'route_type', merge_field]
    with da.UpdateCursor(serv_level_routes, u_fields) as u_cursor:
        for service, r_type, merge in u_cursor:
            # match field must be of type int
            merge = int(str(service_dict[service]) + str(int(r_type)))
            u_cursor.updateRow((service, r_type, merge))
예제 #13
0
def generateMatchCode():
    """Generate a code for the collapse dual carriageway tool that will indicate if two
	segments are eligible to be snapped to each other"""

    service_dict = {'frequent': 1, 'standard': 2, 'rush-hour': 3}

    # create a copy of distinct_routes.shp so that the original is not modified
    management.CopyFeatures(distinct_routes_src, distinct_routes)

    merge_field, f_type = 'merge_id', 'LONG'
    management.AddField(distinct_routes, merge_field, f_type)

    u_fields = ['route_id', 'serv_level', merge_field]
    with da.UpdateCursor(distinct_routes, u_fields) as u_cursor:
        for route, service, merge in u_cursor:
            # create a unique id based on frequency and route that is an integer
            merge = int(str(int(route)) + '000' + str(service_dict[service]))
            u_cursor.updateRow((route, service, merge))
예제 #14
0
def Incrementer(layer):
    #this function calcualtes the unique number for multiple route ID numbers
    fields = ['UniqueNum1', 'RouteNum1']
    string0 = '0000'
    rows = da.UpdateCursor(layer, fields)
    counter = 0
    for row in rows:
        if string0 == row[1]:
            countstring = str(counter).zfill(2)
            row[0] = countstring
            rows.updateRow(row)
            counter += 1
        else:
            string0 = str(int(string0) + 1).zfill(4)
            countstring = str(counter).zfill(2)
            row[0] = countstring
            rows.updateRow(row)
            counter += 1
    del rows
예제 #15
0
def strip(in_table, in_field, in_type):
    #  Check if table is empty
    result = arcpy.GetCount_management(table)
    count = int(result.getOutput(0))

    # If table has no rows, alert user
    if count == 0:
        arcpy.AddMessage("\n" + table + " has no rows...")
    # If table has data, proceed with stripping DFIRM ID
    else:
        arcpy.AddMessage("\n" + "Stripping DFIRM ID off " + in_field + " in " +
                         in_table + "...")
        fields = [in_field, "OBJECTID"]
        with da.UpdateCursor(in_table, fields) as cursor:
            for row in cursor:
                #  if current cell's value is Null, update with "" or "NP" depending on the field type
                if row[0] is None:

                    if in_type == "R":
                        row[0] = "NP"
                        arcpy.AddWarning("- " + in_table + ", " + in_field +
                                         " OID: " + str(row[1]) +
                                         " is Null. Updated with NP")
                    elif in_type == "A":
                        row[0] = ""
                        arcpy.AddWarning("- " + in_table + ", " + in_field +
                                         " OID: " + str(row[1]) +
                                         " is Null. Updated with " + '""')

                else:
                    # if current cell's value is not Null, find the index for underscore
                    underscore_index = row[0].find("_")
                    strip_index = underscore_index + 1

                    # if the parsed value does not contain underscore, alert user and do nothing
                    if underscore_index == -1:
                        pass
                        # arcpy.AddMessage ("- "+in_table+", "+in_field+" OID: "+str(row[1])+" has no underscore")
                    # if the parse value contains underscore, strip characters before + 1
                    else:
                        row[0] = row[0][strip_index:]
                #  save updates
                cursor.updateRow(row)
예제 #16
0
def PointGEOM(fc, tbl, workspace, layer_name, fields):
    #Updates the Geometry point location based on the XY attributes in the GIS table, run this after the XY attributes have been updated
    try:
        MakeFeatureLayer_management(fc, layer_name)
        Tolerance = 0.0000001
        #start the edit operation
        edit = da.Editor(workspace)
        edit.startEditing()
        edit.startOperation()
        with da.UpdateCursor(fc, fields) as ucursor:
            for row in ucursor:
                point = Point(row[0], row[1])
                rowx, rowy = (row[2])
                rowvalues = (row[0], row[1], point, datetime.datetime.now())
                if (type(rowx) == float):
                    intolX = abs(row[0] - rowx)
                    intolY = abs(row[1] - rowy)
                    if intolX < Tolerance and intolY < Tolerance:
                        pass
                    else:
                        point = Point(row[0], row[1])
                        rowvalues = (row[0], row[1], point,
                                     datetime.datetime.now())
                        print(rowvalues)
                        ucursor.updateRow(rowvalues)
                    #print (rowvalues)
                else:
                    point = Point(row[0], row[1])
                    rowvalues = (row[0], row[1], point,
                                 datetime.datetime.now())
                    print "these rows are outside the position tolerance:"
                    print(rowvalues)
                    ucursor.updateRow(rowvalues)
        edit.stopOperation()
        edit.stopEditing(True)
        del layer_name, fc, fields, workspace
        print "point geometry updated"
    except ExecuteError:
        print(GetMessages(2))
예제 #17
0
    def get_temporal_accuracy(self):
        """assigns the temporal accuracy"""
        try:
            #score the features
            arcpy.AddMessage('Scoring the features')
            with da.UpdateCursor(self.temporal_accuracy_features,
                                 self.year_list) as cursor:
                for row in cursor:
                    if row[self.num_years] is None or row[self.num_years] == 0:
                        row[self.num_years+1] = None
                        row[self.num_years+2] = None
                    elif None in row[0:self.num_years-2]:
                        row[self.num_years+1] = None
                        row[self.num_years+2] = None
                    else:
                        if row[self.num_years] >= self.years[self.num_years-1]:
                            row[self.num_years+1] = 6
                            row[self.num_years+2] = 0
                        #orinially 2012, should be indexed at 12?
                        else:
                            try:
                                for x in xrange(0,self.num_years):
                                    if (row[self.num_years] >= self.years[x] and row[self.num_years] < self.years[x+1]):
                                        row[self.num_years+1], row[self.num_years+2] = self.get_score(row[x:self.num_years])
                            except:
                                for x in range(0,self.num_years-2):
                                    if (row[self.num_years] >= self.years[x] and row[self.num_years] < self.years[x+1]):
                                        row[self.num_years+1], row[self.num_years+2] = self.get_score(row[x:self.num_years])

                    cursor.updateRow(row)
        except:
            line, filename, synerror = trace()
            raise FunctionError({
                "function": "get_temporal_accuracy",
                "line": line,
                "filename": __file__,
                "synerror": synerror,
                "arc" : str(arcpy.GetMessages(2))
            })
def commonline(linefc, pointfc, linetype):
    fields = ['SHAPE@', 'LineID', 'S_NodeID', 'E_NodeID']
    with da.SearchCursor(
            linefc, fields,
            'HLType = {0} AND S_NodeID IS NOT NULL AND E_NodeID IS NOT NULL'.
            format(linetype)) as cursorL2:
        for lineRL2 in cursorL2:
            with da.UpdateCursor(
                    linefc, fields,
                    'HLType = {0} AND S_NodeID IS NULL AND E_NodeID IS NULL'.
                    format(linetype)) as cursorL1:
                for lineRL1 in cursorL1:
                    if lineRL1[0].touches(lineRL2[0]):
                        with da.SearchCursor(pointfc,
                                             ['SHAPE@', 'NodeID']) as cursorP:
                            for point in cursorP:
                                if point[0].touches(
                                        lineRL1[0]) and point[0].touches(
                                            lineRL2[0]):
                                    lineRL1[2] = point[1]
                                    with da.SearchCursor(
                                            pointfc,
                                        ['SHAPE@', 'NodeID']) as cursorP2:
                                        for point2 in cursorP2:
                                            if point2[0].touches(
                                                    lineRL1[0]
                                            ) and lineRL1[2] != point2[1]:
                                                lineRL1[3] = point2[1]
                                                cursorL1.updateRow(lineRL1)
                                                print(
                                                    u'  出水管<{0}>:起点<{1}>,终点<{2}>.'
                                                    .format(
                                                        lineRL1[1], lineRL1[2],
                                                        lineRL1[3]))
                                    del cursorP2
                        del cursorP
            del cursorL1
    del cursorL2
예제 #19
0
def simplifyParks():
    """Simplify parks so that they can effectively be displayed at a scale
	of 1:100,000 on the principal system map"""

    # limit parks to sites that are at least 100 acres and that are named
    parks_lyr = 'parks_layer'
    park_type = 'Park and/or Natural Area'
    park_size = 100  # acres
    where_clause = """"TYPE" = '{0}' AND "ACREAGE" > {1} AND "SITENAME" <> ' '"""
    where_populated = where_clause.format(park_type, park_size)
    management.MakeFeatureLayer(orca_sites, parks_lyr, where_populated)

    # use union tool to get rid of any holes in park features
    gaps_setting = 'NO_GAPS'
    parks_union = os.path.join('in_memory', 'parks_union')
    analysis.Union(parks_lyr, parks_union, gaps=gaps_setting)

    # the holes have been filled in by the union tool, but
    parks_dissolve = os.path.join('in_memory', 'parks_dissolve')
    management.Dissolve(parks_union, parks_dissolve)

    # split mulitpart features to single part
    single_part_parks = os.path.join('in_memory', 'single_part_parks')
    management.MultipartToSinglepart(parks_dissolve, single_part_parks)

    # delete any park fragments
    parks_fields = ['OID@', 'SHAPE@AREA']
    with da.UpdateCursor(single_part_parks, parks_fields) as cursor:
        for oid, area in cursor:
            if area < 1000000:  # square feet
                cursor.deleteRow()

    # simplify the parks by smoothing out their edges
    algorithm = 'PAEK'
    tolerance = 5000  # feet
    endpoint_option = 'NO_FIXED'
    cartography.SmoothPolygon(single_part_parks, simplified_parks, algorithm,
                              tolerance, endpoint_option)
def endingline(linefc, pointfc, nodetype, linetype):
    with da.SearchCursor(pointfc, ['SHAPE@', 'NodeID'],
                         'NodeType = {0}'.format(nodetype)) as cursorG:
        for pointGLF in cursorG:
            with da.UpdateCursor(
                    linefc, ['SHAPE@', 'LineID', 'S_NodeID', 'E_NodeID'],
                    'HLType = {0} AND S_NodeID IS NULL AND E_NodeID IS NULL'.
                    format(linetype)) as cursorL:
                for lineRL in cursorL:
                    if lineRL[0].touches(pointGLF[0]):
                        lineRL[3] = pointGLF[1]
                        with da.SearchCursor(
                                pointfc, ['SHAPE@', 'NodeID'],
                                'NodeType <> {0}'.format(nodetype)) as cursorP:
                            for point in cursorP:
                                if lineRL[0].touches(point[0]):
                                    lineRL[2] = point[1]
                                    cursorL.updateRow(lineRL)
                                    print(
                                        u'  回水管<{0}>:起点<{1}>,终点<{2}>.'.format(
                                            lineRL[1], lineRL[2], lineRL[3]))
                        del cursorP
            del cursorL
    del cursorG
예제 #21
0
def data_comparison(in_grid,
                    in_fcs,
                    in_old_gdb,
                    in_new_gdb,
                    out_grid,
                    geom_type="POINT",
                    scratchGDB=env.scratchGDB):
    """
    Generates rankings based on a given grid and data type

    Inputs:
     in_grid: path to area of interests
     in_fcs: list of feature class names
     in_old_gdb: old FGDB path for comparison
     in_new_gdb: new FGDB path for comparison
     out_grid: path of the output GDB feature class
     geom_type: string value of POINT, POLYLINE, or POLYGON
    """
    try:
        merged_points_n = os.path.join("in_memory",
                                       "merged_pts_n")  #scratchGDB
        merged_points_o = os.path.join("in_memory",
                                       "merged_pts_o")  #scratchGDB
        temp_out_grid = os.path.join(scratchGDB, "grid")
        old_stats = os.path.join(scratchGDB, "old_stats")
        new_stats = os.path.join(scratchGDB, "new_stats")
        # Copy Grid to Temp Folder
        temp_out_grid = arcpy.CopyFeatures_management(in_grid,
                                                      temp_out_grid)[0]
        # Merge all fcs into one fc
        merged_points_n, total_n_count = merge_fcs(in_fcs,
                                                   merged_points_n,
                                                   gdb=in_new_gdb)
        merged_points_o, total_o_count = merge_fcs(in_fcs,
                                                   merged_points_o,
                                                   gdb=in_old_gdb)
        # intersect the grid
        new_pts_int = arcpy.Intersect_analysis(
            in_features=[merged_points_n, temp_out_grid],
            out_feature_class=os.path.join(scratchGDB, "new_pts_int"),
            join_attributes="ONLY_FID")[0]
        old_pts_int = arcpy.Intersect_analysis(
            in_features=[merged_points_o, temp_out_grid],
            out_feature_class=os.path.join(scratchGDB, "old_pts_int"),
            join_attributes="ONLY_FID")[0]
        if geom_type.lower() == "point":
            stat_fields_new = "FID_grid COUNT"
            stat_fields_old = "FID_grid COUNT"
            method = ["POINT"]
        elif geom_type.lower() in ("polyline", 'polygon'):
            arcpy.AddField_management(old_pts_int, "OLD_LENGTH", "FLOAT")
            arcpy.CalculateField_management(
                in_table=old_pts_int,
                field="OLD_LENGTH",
                expression="!shape.length@kilometers!",
                expression_type="PYTHON_9.3",
                code_block="")
            arcpy.AddField_management(new_pts_int, "NEW_LENGTH", "FLOAT")
            arcpy.CalculateField_management(
                in_table=new_pts_int,
                field="NEW_LENGTH",
                expression="!shape.length@kilometers!",
                expression_type="PYTHON_9.3",
                code_block="")
            if geom_type.lower() == "polygon":
                arcpy.AddField_management(old_pts_int, "OLD_AREA", "FLOAT")
                #!shape.area@squarekilometers!
                arcpy.CalculateField_management(
                    in_table=old_pts_int,
                    field="OLD_AREA",
                    expression="!shape.area@squarekilometers!",
                    expression_type="PYTHON_9.3",
                    code_block="")
                arcpy.AddField_management(new_pts_int, "NEW_AREA", "FLOAT")
                arcpy.CalculateField_management(
                    in_table=new_pts_int,
                    field="NEW_AREA",
                    expression="!shape.area@squarekilometers!",
                    expression_type="PYTHON_9.3",
                    code_block="")
            if geom_type.lower() == "polygon":
                stat_fields_new = "FID_grid COUNT;NEW_LENGTH SUM;NEW_AREA SUM"
                stat_fields_old = "FID_grid COUNT;OLD_LENGTH SUM;OLD_AREA SUM"
                method = ['POINT', 'POLYLINE', 'POLYGON']
            else:
                stat_fields_new = "FID_grid COUNT;NEW_LENGTH SUM"
                stat_fields_old = "FID_grid COUNT;OLD_LENGTH SUM"
                method = ['POINT', 'POLYLINE']

        # get the counts
        old_stats = arcpy.Statistics_analysis(
            in_table=old_pts_int,
            out_table=old_stats,
            statistics_fields=stat_fields_old,
            case_field="FID_grid")[0]
        new_stats = arcpy.Statistics_analysis(
            in_table=new_pts_int,
            out_table=new_stats,
            statistics_fields=stat_fields_new,
            case_field="FID_grid")[0]
        # join the old stats to the new stats
        if geom_type.lower() == "polygon":
            arcpy.AlterField_management(new_stats,
                                        field="SUM_NEW_LENGTH",
                                        new_field_name="NEW_LENGTH")
            arcpy.AlterField_management(new_stats,
                                        field="SUM_NEW_AREA",
                                        new_field_name="NEW_AREA")
            out_fields = [
                'FID_grid', 'FREQUENCY', 'SUM_OLD_LENGTH', 'SUM_OLD_AREA'
            ]
            ndt = np.dtype([('FID_grid', '<i4'), ('OLD_FREQUENCY', '<i4'),
                            ('OLD_LENGTH', np.float64),
                            ('OLD_AREA', np.float64)])
            export_fields = [
                'FID_grid', 'FREQUENCY', 'OLD_FREQUENCY', 'OLD_LENGTH',
                'NEW_LENGTH', 'OLD_AREA', 'NEW_AREA', 'SCORE', 'RANKING'
            ]
        elif geom_type.lower() == "point":
            out_fields = ['FID_grid', 'FREQUENCY']
            ndt = np.dtype([('FID_grid', '<i4'), ('OLD_FREQUENCY', '<i4')])
            export_fields = [
                'FID_grid', 'FREQUENCY', 'OLD_FREQUENCY', 'SCORE', 'RANKING'
            ]
        elif geom_type.lower() == "polyline":
            arcpy.AlterField_management(new_stats,
                                        field="SUM_NEW_LENGTH",
                                        new_field_name="NEW_LENGTH")
            out_fields = ['FID_grid', 'FREQUENCY', 'SUM_OLD_LENGTH']
            ndt = np.dtype([('FID_grid', '<i4'), ('OLD_FREQUENCY', '<i4'),
                            ('OLD_LENGTH', np.float64)])
            export_fields = [
                'FID_grid', 'FREQUENCY', 'OLD_FREQUENCY', 'OLD_LENGTH',
                'NEW_LENGTH', 'SCORE', 'RANKING'
            ]
        old_array = da.TableToNumPyArray(in_table=old_stats,
                                         field_names=out_fields)
        old_array.dtype = ndt
        # Add SCORE and RANKING fields and remove unneeded fields
        da.ExtendTable(new_stats, "FID_grid", old_array, "FID_grid", False)
        array = np.array([],
                         np.dtype([('_id', np.int32), ('SCORE', np.float64),
                                   ('RANKING', np.int64)]))
        da.ExtendTable(new_stats,
                       arcpy.Describe(new_stats).OIDFieldName, array, "_id",
                       False)
        arcpy.DeleteField_management(new_stats, ['COUNT_FID_grid'])
        array = da.TableToNumPyArray(in_table=new_stats,
                                     field_names=export_fields,
                                     null_value=0)
        # Calculate the rankings
        tcsv, column_list = calculate_frequency_ranking(array=array,
                                                        methods=method)
        array = da.TableToNumPyArray(tcsv, column_list)
        da.ExtendTable(temp_out_grid,
                       arcpy.Describe(temp_out_grid).OIDFieldName, array,
                       "FID_grid")
        # Clean up NULL values
        if geom_type.lower() == "point":
            sql = """RANKING IS NULL"""
            fields = ['RANKING']
        elif geom_type.lower() == "polyline":
            sql = """RANKING IS NULL OR RANKING_LENGTH IS NULL"""
            fields = ['RANKING', 'RANKING_LENGTH']
        elif geom_type.lower() == "polygon":
            sql = """RANKING IS NULL OR RANKING_LENGTH IS NULL OR RANKING_AREA IS NULL"""
            fields = ['RANKING', 'RANKING_LENGTH', 'RANKING_AREA']
        with da.UpdateCursor(temp_out_grid, fields, where_clause=sql) as urows:
            for row in urows:
                if geom_type.lower() == "point":
                    row[0] = 0
                elif geom_type.lower() == "polyline":
                    if row[0] is None:
                        row[0] = 0
                    if row[1] is None:
                        row[1] = 0
                elif geom_type.lower() == "polygon":
                    if row[0] is None:
                        row[0] = 0
                    if row[1] is None:
                        row[1] = 0
                    if row[2] is None:
                        row[2] = 0
                urows.updateRow(row)
                del row
        del urows
        # return the output grid
        return arcpy.CopyFeatures_management(temp_out_grid, out_grid)[0]
    except:
        line, filename, synerror = trace()
        raise FunctionError({
            "function": "data_comparison",
            "line": line,
            "filename": filename,
            "synerror": synerror,
            "arc": str(arcpy.GetMessages(2))
        })
med_blk_price = arcpy.Statistics_analysis(
    CSA70blk,
    os.path.join(work_path, "med_blk_price"),
    statistics_fields=[["salepr16", 'MEAN'], ["Join_Count", 'SUM'],
                       ["vacant16", 'MEAN']])
med_wht_price = arcpy.Statistics_analysis(
    CSA70wht,
    os.path.join(work_path, "med_wht_price"),
    statistics_fields=[["salepr16", 'MEAN'], ["Join_Count", 'SUM'],
                       ["vacant16", 'MEAN']])

#Add new field indicating the majority race to both tables
field = 'maj_rac'
arcpy.AddField_management(med_blk_price, field, 'TEXT')

with da.UpdateCursor(med_blk_price, field) as cursor:
    for row in cursor:
        row[0] = "Black"
        cursor.updateRow(row)
    del cursor

arcpy.AddField_management(med_wht_price, field, 'TEXT')

with da.UpdateCursor(med_wht_price, field) as cursor:
    for row in cursor:
        row[0] = "White"
        cursor.updateRow(row)

#Merge tables for comparison
comp_CSAs = arcpy.Merge_management(['med_blk_price', 'med_wht_price'],
                                   os.path.join(work_path, "Compare_majCSAs"))
import arcpy
import arcpy.da as da

arcpy.AddField_management("judTab", "pondere", "SHORT")
lista_lungimi = [rand[0] for rand in da.SearchCursor("judTab", ("lungime"))]
sl = sum(lista_lungimi)
with da.UpdateCursor("judTab", ("lungime", "pondere")) as Calc_pondere:
    for rand in Calc_pondere:
        rand[1] = int(round(rand[0] / sl * 100))
        if rand[1] == 0:
            rand[1] = 1
        Calc_pondere.updateRow(rand)

dh = harta.MapDocument("CURRENT")
df = harta.ListDataFrames(dh, "Layers")[0]

l_jud_sel = harta.ListLayers(dh, "judTab", df)[0]
fl_simb = "T:/IE/uvc.lyr"
l_simb = harta.Layer(fl_simb)
harta.UpdateLayer(df, l_jud_sel, l_simb, "TRUE")
if l_jud_cel.symbologyType == "UNIQUE_VALUES":
    l_jud_cel.symbology.valueField = "pondere"
    l_jud_cel.symbology.addAllValues()
    l_jud_cel.symbology.showOtherValues = false

arcpy.RefreshActiveView()
arcpy.RefreshTOC()
예제 #24
0
def process():
    class LicenseError(Exception):
        pass

    try:
        if CheckExtension("3D") == "Available":
            CheckOutExtension("3D")
        else:
            # raise a custom exception
            raise LicenseError

        # Constants - DO NOT MODIFY
        split_area = "split_area"
        orig_area = "orig_area"

        def calc_area(in_fc, field_name):
            AddField(in_fc, field_name, "DOUBLE")
            with da.UpdateCursor(in_fc, [field_name, "SHAPE@AREA"]) as cursor1:
                for r1 in cursor1:
                    r1[0] = r1[1]
                    cursor1.updateRow(r1)

        def field_exists(in_fc, in_field):
            from arcpy import ListFields
            if in_field in [f.name for f in ListFields(in_fc)]:
                return True
            else:
                return False

        def delete_field_if_exists(in_fc, in_field):
            if field_exists(in_fc, in_field):
                DeleteField(in_fc, in_field)

        assert field_exists(in_buildings, building_fid), \
            "no attribute named {} in feature class".format(building_fid)

        for field in [tile_fid, file_name]:
            delete_field_if_exists(in_buildings, field)

        temp_fp = join("in_memory", "mp_fp")
        ddd.MultiPatchFootprint(in_buildings, temp_fp, "bldg_fid")

        calc_area(in_fc=temp_fp, field_name=orig_area)

        temp_isect = join("in_memory", "temp_isect")
        Intersect(r"{0} #;{1} #".format(temp_fp, in_tiles), temp_isect, "ALL",
                  None, "INPUT")

        # Delete Temporary Multipatch Footprint
        Delete(temp_fp)

        calc_area(in_fc=temp_isect, field_name=split_area)

        temp_isect_asc = join("in_memory", "temp_isect_asc")
        Sort(temp_isect, temp_isect_asc, [[building_fid, "ASCENDING"]])
        # Delete Temporary Intersect Feature Class
        Delete(temp_isect)

        fields = [building_fid, tile_fid, file_name, orig_area, split_area]

        # Generate a list of duplicates
        bldg_list = []
        with da.SearchCursor(temp_isect_asc, building_fid) as cursor2:
            for row in cursor2:
                bldg_list.append(row[0])

        duplicates = [
            item for item, count in Counter(bldg_list).items() if count > 1
        ]

        duplicates_list = []
        for i in duplicates:
            duplicates_list.append([i, bldg_list.count(i)])

        # TODO: Resolve why tile_fid is not showing up below when BuildingFID and TileFID are OID fields. "In_memory" issue
        '''
        # \\ Begin Debug print code
        from arcpy import AddMessage
        fds = [f.name for f in arcpy.ListFields(temp_isect_asc) if f.name in fields]
        AddMessage(fds)
        nfds = [f.name for f in arcpy.ListFields(temp_isect_asc) if f.name not in fields]
        AddMessage(nfds)
        # End Debug pring code //
        '''
        final_list = []
        with da.SearchCursor(temp_isect_asc, fields) as cursor3:
            prev_area = -1
            prev_item_list = []
            item_count = 0
            fcound = 0
            for row in cursor3:
                if row[0] not in duplicates:
                    final_list.append([row[0], row[1], row[2]])
                else:
                    area = row[3] - row[4]
                    index = duplicates.index(row[0])
                    total_items = duplicates_list[index][1]
                    if row[0] == duplicates[
                            0] and item_count == 0:  # Deal with first item differently
                        item_count += 1
                        prev_area = area
                        prev_item_list = [row[0], row[1], row[2]]
                    elif item_count + 1 == total_items:  # Deal with last item in list
                        if prev_area <= area:
                            prev_area = area
                            prev_item_list = [row[0], row[1], row[2]]
                        final_list.append(prev_item_list)
                        item_count = 0
                        prev_area = -1
                        prev_item_list = []
                    elif item_count + 1 != total_items:
                        if prev_area <= area:
                            prev_area = area
                            prev_item_list = [row[0], row[1], row[2]]
                        item_count += 1
        # Append results back to Input Feature Class
        AddField(in_buildings, tile_fid, "LONG")
        AddField(in_buildings, file_name, "TEXT")
        with da.UpdateCursor(in_buildings,
                             [building_fid, tile_fid, file_name]) as cursor:
            for r in cursor:
                for i in final_list:
                    if r[0] == i[0]:
                        r[1] = int(i[1])
                        r[2] = str(i[2])
                cursor.updateRow(r)

        Delete(temp_isect)
        del bldg_list
        del duplicates_list
        del duplicates

        # Check back in 3D Analyst license
        CheckInExtension("3D")
    except LicenseError:
        AddError("3D Analyst license is unavailable")
        print("3D Analyst license is unavailable")
    except ExecuteError:
        AddError("3D Analyst license is unavailable")
        print(GetMessages(2))
예제 #25
0
    nonecount = NoneCount(LineFC)[0]
    print(u'剩余{}条管线尚未分配编号,即将执行...'.format(nonecount))
    while nonecount:
            UpdateLineAttribute()
            if NoneCount(LineFC)[0] < nonecount:
                IterateLines()



statTime = time.time()
print('Starting...')
# 遍历锅炉房点数据,首次更新线属性
with da.SearchCursor(pointFC, ['SHAPE@', 'NodeID'], 'NodeType = 8') as cursorG:
    for pointGLF in cursorG:
        # 遍历出水管线
        with da.UpdateCursor(LineFC, ['SHAPE@', 'LineID', 'S_NodeID', 'E_NodeID'], 'HLType = 1') as cursorL:
            for lineRL in cursorL:
                if lineRL[2] == None and lineRL[3] == None:
                    if lineRL[0].touches(pointGLF[0]) == True:
                        # 起点编号
                        lineRL[2] = pointGLF[1]
                        # 终点编号
                        with da.SearchCursor(pointFC, ['SHAPE@', 'NodeID'], 'NodeType <> 8') as cursorP:
                            for point in cursorP:
                                if lineRL[0].touches(point[0]) == True:
                                    lineRL[3] = point[1]
                                    cursorL.updateRow(lineRL)
                                    print(u'<{0}>的起点编号是<{1}>,终点编号是<{2}>.'.format(lineRL[1],lineRL[2],lineRL[3]))

# 执行自动顺序赋值函数
IterateLines()
예제 #26
0
    def output2NewFC(self,
                     outputFC,
                     candidateFields,
                     appendFields=[],
                     fieldOrder=[]):
        """Creates a new feature class with the same shape charcteristics as
        the source input feature class and appends data to it.

        INPUTS:
        outputFC (str): catalogue path to output feature class
        candidateFields (dict): fieldName = instance of CandidateField
        appendFields {list, []}: field names in the order you want appended
        fieldOrder {list, []}: the order with which to write fields
        """

        #### Initial Progressor Bar ####
        ARCPY.overwriteOutput = True
        ARCPY.SetProgressor("default", ARCPY.GetIDMessage(84006))

        #### Validate Output Workspace ####
        ERROR.checkOutputPath(outputFC)

        #### Create Path for Output FC ####
        outPath, outName = OS.path.split(outputFC)

        #### Get Output Name for SDE if Necessary ####
        baseType = UTILS.getBaseWorkspaceType(outPath)
        if baseType.upper() == 'REMOTEDATABASE':
            outName = outName.split(".")[-1]
        self.outputFC = OS.path.join(outPath, outName)

        #### Assess Whether to Honor Original Field Nullable Flag ####
        setNullable = UTILS.setToNullable(self.catPath, self.outputFC)

        #### Add Null Value Flag ####
        outIsShapeFile = UTILS.isShapeFile(self.outputFC)

        #### Create Output Field Names to be Appended From Input ####
        inputFieldNames = ["SHAPE@", self.masterField]
        appendFieldNames = []
        masterIsOID = self.masterField == self.oidName
        if masterIsOID:
            appendFieldNames.append("SOURCE_ID")
        else:
            master = self.allFields[self.masterField.upper()]
            returnName = UTILS.returnOutputFieldName(master)
            appendFieldNames.append(returnName)

        for fieldName in appendFields:
            field = self.allFields[fieldName.upper()]
            returnName = UTILS.returnOutputFieldName(field)
            inputFieldNames.append(fieldName)
            appendFieldNames.append(returnName)
        appendFieldNames = UTILS.createAppendFieldNames(
            appendFieldNames, outPath)
        masterOutName = appendFieldNames[0]

        #### Create Field Mappings for Visible Fields ####
        outputFieldMaps = ARCPY.FieldMappings()

        #### Add Input Fields to Output ####
        for ind, fieldName in enumerate(appendFieldNames):
            if ind == 0:
                #### Master Field ####
                sourceFieldName = self.masterField
                if masterIsOID:
                    fieldType = "LONG"
                    alias = fieldName
                    setOutNullable = False
                    fieldLength = None
                    fieldPrecision = None
                else:
                    masterOutField = self.allFields[self.masterField.upper()]
                    fieldType = masterOutField.type
                    alias = masterOutField.baseName
                    setOutNullable = setNullable
                    fieldLength = masterOutField.length
                    fieldPrecision = masterOutField.precision
            else:
                #### Append Fields ####
                sourceFieldName = appendFields[ind - 1]
                outField = self.allFields[sourceFieldName]
                fieldType = outField.type
                alias = outField.baseName
                setOutNullable = setNullable
                fieldLength = outField.length
                fieldPrecision = outField.precision

            #### Create Candidate Field ####
            outCandidate = CandidateField(fieldName,
                                          fieldType,
                                          None,
                                          alias=alias,
                                          precision=fieldPrecision,
                                          length=fieldLength)

            #### Create Output Field Map ####
            outFieldMap = UTILS.createOutputFieldMap(
                self.inputFC,
                sourceFieldName,
                outFieldCandidate=outCandidate,
                setNullable=setOutNullable)

            #### Add Output Field Map to New Field Mapping ####
            outputFieldMaps.addFieldMap(outFieldMap)

        #### Do FC2FC Without Extent Env Var ####
        FC2FC = UTILS.clearExtent(CONV.FeatureClassToFeatureClass)
        try:
            FC2FC(self.inputFC, outPath, outName, "", outputFieldMaps)
        except:
            ARCPY.AddIDMessage("ERROR", 210, self.outputFC)
            raise SystemExit()

        #### Create/Verify Result Field Order ####
        fieldKeys = candidateFields.keys()
        fieldKeys.sort()
        if len(fieldOrder) == len(fieldKeys):
            fKeySet = set(fieldKeys)
            fieldOrderSet = set(fieldOrder)
            if fieldOrderSet == fKeySet:
                fieldKeys = fieldOrder

            del fKeySet, fieldOrderSet

        #### Add Empty Output Analysis Fields ####
        outputFieldNames = [masterOutName]
        for fieldInd, fieldName in enumerate(fieldKeys):
            field = candidateFields[fieldName]
            field.copy2FC(outputFC)
            outputFieldNames.append(fieldName)

            #### Replace NaNs for Shapefiles ####
            if outIsShapeFile:
                if field.type != "TEXT":
                    isNaN = NUM.isnan(field.data)
                    if NUM.any(isNaN):
                        field.data[isNaN] = UTILS.shpFileNull[field.type]

        #### Populate Output Feature Class with Values ####
        ARCPY.SetProgressor("step", ARCPY.GetIDMessage(84003), 0, self.numObs,
                            1)
        outRows = DA.UpdateCursor(self.outputFC, outputFieldNames)

        for row in outRows:
            masterID = row[0]
            if self.master2Order.has_key(masterID):
                order = self.master2Order[masterID]

                #### Create Output Row from Input ####
                resultValues = [masterID]

                #### Add Result Values ####
                for fieldName in fieldKeys:
                    field = candidateFields[fieldName]
                    fieldValue = field.data.item(order)
                    resultValues.append(fieldValue)

                #### Insert Values into Output ####
                outRows.updateRow(resultValues)

            else:
                #### Bad Record ####
                outRows.deleteRow()

            ARCPY.SetProgressorPosition()

        #### Clean Up ####
        del outRows
예제 #27
0
def ReportResolutionOrdering():
    # Create a list to hold the rows from the cursor.
    holderList = list()
    # Connection to the feature class
    fc = connection1 + "CCL_Report"

    testForField = "RESOLUTION_ORDER"
    fieldSeen = 0

    # Look for the RESOLUTION_ORDER field in the table.
    fieldCheckList = ListFields(fc)
    for fieldCheckElement in fieldCheckList:
        if str.upper(str(fieldCheckElement.name)) == str.upper(testForField):
            fieldSeen += 1

    # If the RESOLUTION_ORDER field does not yet exist, add it.
    if fieldSeen == 0:
        print "Adding the Resolution_Order field to the CCL_Report table."
        AddField_management(connection1 + "CCL_Report", "RESOLUTION_ORDER",
                            "SHORT")
        print "Populating Resolution_Order with new values."
    else:
        print "The Resolution_Order field already exists within the CCL_Report table."
        print "Updating the Resolution_Order values."

    # Start the cursor to retrieve the rows from the feature class.
    #fieldList = list()
    fieldList = [
        'OBJECTID', 'CCL_LRS', 'CCL_BEGIN', 'DESCRIPTION', 'CITY',
        'RESOLUTION_ORDER'
    ]
    # Cursor to read the all the fields and place them in an array.
    cursor = da.SearchCursor(fc, fieldList)  # @UndefinedVariable
    for row in cursor:
        listRow = list(row)
        holderList.append(listRow)

    if cursor:
        del cursor
    else:
        pass
    if row:
        del row
    else:
        pass

    # Create a dictionary to store the rows by City.
    rowListDictionary = {}

    # Loop through the list to build a dictionary with CCL_Routes as keys.
    for heldRow in holderList:
        # Each key will hold a list of lists.
        rowListContainer = list()
        # If the key already exists, assign the previous list of lists
        # to the list container, then append the new list
        # before updating the key in the dictionary.
        if heldRow[1] in rowListDictionary:
            rowListContainer = rowListDictionary[heldRow[1]]
            rowListContainer.append(heldRow)
            rowListDictionary[heldRow[1]] = rowListContainer
        # Otherwise, the key needs to be created
        # with the list container having only one list contained
        # within it for now.
        else:
            rowListContainer.append(heldRow)
            rowListDictionary[heldRow[1]] = rowListContainer

    for cclKey in rowListDictionary:
        outListContainer = rowListDictionary[cclKey]
        # Sort based on CCL_Begin.
        outListContainer.sort(key=lambda sortingRow: sortingRow[2])
        countVariable = 0
        descVariable = ''
        for outListIndex, outList in enumerate(outListContainer):

            # Is this the first list/row in the key's list container?
            # If so, then set the Resolution_Order to 0
            if outListIndex == 0:
                outList[5] = 0
                descVariable = outList[3]
            else:
                currentDesc = outList[3]
                # Check to see if the previous description is the same
                # as the current description.
                if currentDesc == descVariable:
                    # If so, set the Resolution_Order to
                    # the current countVariable
                    # and do not increment it.
                    outList[5] = countVariable
                else:
                    # The current desc is different than
                    # the previous desc, so update
                    # the count variable prior
                    # to assignment.
                    countVariable += 1
                    outList[5] = countVariable
                    descVariable = outList[3]

            outListContainer[outListIndex] = outList

        rowListDictionary[cclKey] = outListContainer

    # Need to add an update cursor that will update
    # the RESOLUTION_ORDER field with
    # values from the rowListDictionary
    # based on the shared OBJECTID field.

    fieldList = list()
    fieldList = ['OBJECTID', 'CCL_LRS', 'RESOLUTION_ORDER']

    cursor = da.UpdateCursor(fc, fieldList)  # @UndefinedVariable
    for row in cursor:
        cclKey = row[1]
        outListContainer = rowListDictionary[cclKey]
        for outList in outListContainer:
            #print "City: " + str(outList[4]) + " ObjectID: " + str(outList[0]) + " Order: " + str(outList[5]) # For Testing
            if row[0] == outList[0]:
                # If the ObjectID for the list in the list container
                # for the matching CCL_LRS equals the OBJECTID
                # field in the cursor row, update the
                # cursor row's RESOLUTION_ORDER field
                # to be the same as that list's
                # resolution order field.
                row[2] = outList[5]
            else:
                pass
        cursor.updateRow(row)

    if cursor:
        del cursor
    else:
        pass
    if row:
        del row
    else:
        pass
arcpy.env.workspace = "t:/IE/bd_ro.mdb"
arcpy.env.overwriteOutput = True

tab_inj = arcpy.GetParameterAsText(0)
tab_ind = arcpy.GetParameterAsText(1)

dh = harta.MapDocument("CURRENT")
df = harta.ListDataFrames(dh, "Layers")[0]

ljt = harta.Layer(tab_inj)
ljt.showLabels = True
harta.AddLayer(df, ljt, "TOP")

llt = harta.Layer(tab_ind)
harta.AddLayer(df, llt, "TOP")

listJud = [rand[0] for rand in da.SearchCursor("judTab", ("sj"))]

for jud in listJud:
    arcpy.SelectLayerByAttribute_management(ljt, "NEW_SELECTION",
                                            "sj='" + jud + "'")
    arcpy.Clip_analysis(llt, ljt, "tabClip")
    arcpy.SelectLayerByAttribute_management(ljt, "CLEAR_SELECTION")
    listSeg = [rd[0] for rd in da.SearchCursor("tabClip", ("SHAPE@LENGTH"))]
    sl = sum(listSeg)
    crs = da.UpdateCursor(ljt, ("Lungime"), "sj='" + jud + "'")
    rand = crs.next()
    rand[0] = sl
    crs.updateRow(rand)
del crs
예제 #29
0
 def calc_area(in_fc, field_name):
     AddField(in_fc, field_name, "DOUBLE")
     with da.UpdateCursor(in_fc, [field_name, "SHAPE@AREA"]) as cursor1:
         for r1 in cursor1:
             r1[0] = r1[1]
             cursor1.updateRow(r1)
예제 #30
0
#build dictionary with search cursor
geometries = {
    key: value
    for (key, value) in arcpy.da.SearchCursor(A, [idFieldA, 'SHAPE@'])
}

#create edit session
edit = da.Editor(engSDE)
edit.startEditing(True, True)
edit.startOperation()

print("Update Starting...")

#Update B with geometries from A where IDs match
updatedParcels = []
with da.UpdateCursor(B, [idFieldB, skip_Field, 'SHAPE@']) as cursor:
    for row in cursor:
        if row[1] == '0':
            if row[2] != geometries[row[0]]:
                try:
                    print(row[0])
                    updatedParcels.append(row[0])
                    row[2] = geometries[row[0]]
                    cursor.updateRow(row)
                except:
                    notFound.append(row[0])
    print("Unable to update: ", notFound)

print("Update Finished. Reconciling and Posting.")

arcpy.ReconcileVersions_management(engSDE, "ALL_VERSIONS", "dbo.DEFAULT",