def derive_data_from_catchments(catchment_areas,
                                flow_direction_raster,
                                slope_raster,
                                curve_number_raster,
                                area_conv_factor=0.00000009290304,
                                length_conv_factor=1,
                                out_catchment_polygons=None):
    """
    For tools that handle multiple inputs quickly, we execute here (e.g., zonal
    stats). For those we need to run on individual catchments, this parses the
    catchments raster and passes individual catchments, along with other required 
    data, to the calc_catchment_flowlength_max function.

    area_conversion_factor: for converting the area of the catchments to Sq. Km, which is 
        expected by the core business logic. By default, the factor converts from square feet 
    out_catchment_polygons: will optionally return a catchment polygon feature class.

    Output: an array of records containing info about each inlet's catchment, e.g.:
        [
            {
                "id": <ID value from pour_point_field (spec'd in catchment_delineation func)> 
                "area_sqkm": <area of inlet's catchment in square km>
                "avg_slope": <average slope of DEM in catchment>
                "avg_cn": <average curve number in the catchment>
                "max_fl": <maximum flow length in the catchment>
            },
            {...},
            ...
         ]
    """
    raster_field = "Value"

    # store the results, keyed by a catchment ID (int) that comes from the
    # catchments layer gridcode
    results = {}

    # make a raster object with the catchment raster
    if not isinstance(catchment_areas, Raster):
        c = Raster(catchment_areas)
    else:
        c = catchment_areas
    # if the catchment raster does not have an attribute table, build one
    if not c.hasRAT:
        BuildRasterAttributeTable_management(c, "Overwrite")

    # make a table view of the catchment raster
    catchment_table = 'catchment_table'
    MakeTableView_management(
        c, catchment_table)  #, {where_clause}, {workspace}, {field_info})

    # calculate flow length for each zone. Zones must be isolated as individual
    # rasters for this to work. We handle that with calc_catchment_flowlength_max()
    # using the table to get the zone values...
    catchment_count = int(GetCount_management(catchment_table).getOutput(0))
    with SearchCursor(catchment_table, [raster_field]) as catchments:

        # TODO: implement multi-processing for this loop.

        ResetProgressor()
        SetProgressor('step', "Mapping flow length for catchments", 0,
                      catchment_count, 1)
        # msg("Mapping flow length for catchments")

        for idx, each in enumerate(catchments):
            this_id = each[0]
            # msg("{0}".format(this_id))
            # calculate flow length for each "zone" in the raster
            fl_max = calc_catchment_flowlength_max(catchment_areas, this_id,
                                                   flow_direction_raster,
                                                   length_conv_factor)
            if this_id in results.keys():
                results[this_id]["max_fl"] = clean(fl_max)
            else:
                results[this_id] = {"max_fl": clean(fl_max)}
            SetProgressorPosition()
        ResetProgressor()

    # calculate average curve number within each catchment for all catchments
    table_cns = so("cn_zs_table", "timestamp", "fgdb")
    msg("CN Table: {0}".format(table_cns))
    ZonalStatisticsAsTable(catchment_areas, raster_field, curve_number_raster,
                           table_cns, "DATA", "MEAN")
    # push table into results object
    with SearchCursor(table_cns, [raster_field, "MEAN"]) as c:
        for r in c:
            this_id = r[0]
            this_area = r[1]
            if this_id in results.keys():
                results[this_id]["avg_cn"] = clean(this_area)
            else:
                results[this_id] = {"avg_cn": clean(this_area)}

    # calculate average slope within each catchment for all catchments
    table_slopes = so("slopes_zs_table", "timestamp", "fgdb")
    msg("Slopes Table: {0}".format(table_slopes))
    ZonalStatisticsAsTable(catchment_areas, raster_field, slope_raster,
                           table_slopes, "DATA", "MEAN")
    # push table into results object
    with SearchCursor(table_slopes, [raster_field, "MEAN"]) as c:
        for r in c:
            this_id = r[0]
            this_area = r[1]
            if this_id in results.keys():
                results[this_id]["avg_slope"] = clean(this_area)
            else:
                results[this_id] = {"avg_slope": clean(this_area)}

    # calculate area of each catchment
    #ZonalGeometryAsTable(catchment_areas,"Value","output_table") # crashes like an mfer
    cp = so("catchmentpolygons", "timestamp", "in_memory")
    #RasterToPolygon copies our ids from raster_field into "gridcode"
    RasterToPolygon_conversion(catchment_areas, cp, "NO_SIMPLIFY",
                               raster_field)

    # Dissolve the converted polygons, since some of the raster zones may have corner-corner links
    if not out_catchment_polygons:
        cpd = so("catchmentpolygonsdissolved", "timestamp", "in_memory")
    else:
        cpd = out_catchment_polygons
    Dissolve_management(in_features=cp,
                        out_feature_class=cpd,
                        dissolve_field="gridcode",
                        multi_part="MULTI_PART")

    # get the area for each record, and push into results object
    with SearchCursor(cpd, ["gridcode", "SHAPE@AREA"]) as c:
        for r in c:
            this_id = r[0]
            this_area = r[1] * area_conv_factor
            if this_id in results.keys():
                results[this_id]["area_up"] = clean(this_area)
            else:
                results[this_id] = {"area_up": clean(this_area)}

    # flip results object into a records-style array of dictionaries
    # (this makes conversion to table later on simpler)
    # msg(results,"warning")
    records = []
    for k in results.keys():
        record = {
            "area_up": 0,
            "avg_slope": 0,
            "max_fl": 0,
            "avg_cn": 0,
            "tc_hr": 0
        }
        for each_result in record.keys():
            if each_result in results[k].keys():
                record[each_result] = results[k][each_result]
        record["id"] = k
        records.append(record)

    if out_catchment_polygons:
        return records, cpd
    else:
        return records, None
def get_USGS_metadata(usgs_fc):
    """
    Access the USGS site information REST API to get the basin area
    for all applicable sites. Adds the basinarea field to the FC and
    writes the data returned from the REST serivce.

    Required: usgs_fc -- the feature class of records from the AWDB

    Returns:  None
    """

    import urllib
    import gzip
    from re import search
    from arcpy import ListFields, AddField_management
    from arcpy.da import SearchCursor, UpdateCursor
    import io

    # check for area field and add if missing
    fields = ListFields(usgs_fc)

    for fieldname, datatype in NEW_FIELDS:
        for field in fields:
            if field.name == fieldname:
                break
        else:
            AddField_management(usgs_fc, fieldname, datatype)

    # get a list of station IDs in the FC
    stationIDs = []
    with SearchCursor(usgs_fc, STATION_ID_FIELD) as cursor:
        for row in cursor:
            sid = row[0].split(":")[0]
            # valid USGS station IDs are between 8 and 15 char and are numerical
            if len(sid) >= 8 and not search('[a-zA-Z]', sid):
                stationIDs.append(sid)

    # setup and get the HTTP request
    request = urllib.request.Request(
        settings.USGS_URL,
        urllib.parse.urlencode({
            "format": "rdb",  # get the data in USGS rdb format
            "sites": ",".join(stationIDs),  # the site IDs to get, separated by commas
            "siteOutput": "expanded"  # expanded output includes basin area
            #"modifiedSince": "P" + str(SCRIPT_FREQUENCY) + "D"  # only get records modified since last run
        }).encode('utf-8')
    )

    # allow gzipped response
    request.add_header('Accept-encoding', 'gzip')
    response = urllib.request.urlopen(request)

    # check to see if response is gzipped and decompress if yes
    if response.info().get('Content-Encoding') == 'gzip':
        buf = io.BytesIO(response.read())
        data = gzip.GzipFile(fileobj=buf)
    else:
        data = response

    # parse the response and create a dictionary keyed on the station ID
    stationAreas = {}
    for line in data.readlines():
        line = line.decode('utf-8')
        if line.startswith('USGS'):
            # data elements in line (station record) are separated by tabs
            line = line.split('\t')
            # the 2nd element is the station ID, 3rd is the name,
            # and the 30th is the area
            # order in the tuple is important,
            # so data is entered into the correct fields in the table
            stationAreas[line[1]] = (line[29], line[1], line[2])

    # write the response data to the FC
    fieldsToAccess = [STATION_ID_FIELD]+[name for name, datatype in NEW_FIELDS]
    with UpdateCursor(usgs_fc, fieldsToAccess) as cursor:
        for row in cursor:
            stationid = row[0].split(":")[0]

            try:
                # row[1] is area
                row[1] = float(stationAreas[stationid][0])
            except KeyError:
                # in case no record was returned for ID
                # skip to next record
                continue
            except ValueError:
                # in case area returned is ""
                pass

            try:
                # row[2] is the USGS station ID
                row[2] = stationAreas[stationid][1]
            except ValueError:
                # in case ID returned is ""
                pass

            try:
                # row[3] is the USGS station name
                row[3] = stationAreas[stationid][2]
            except ValueError:
                # in case name returned is ""
                pass

            # no exception so data valid, update row
            cursor.updateRow(row)
Example #3
0
"""
Print out only "friendly" cats
"""
import os

from arcpy.da import SearchCursor

in_table = os.path.join(os.getcwd(),
                        r"Intro_GP_Script_Tools_2018.gdb\Cat_Data")
field_names = ['OID@', 'Type']
sql = "friendly = 1"

output = '\nFriendly Cats:\n'

with SearchCursor(in_table, field_names, sql) as sc:
    for row in sc:
        output += 'OID: {0} -- {1} cat\n'.format(*row)

print(output)
arcpy.AddMessage(output)
def build_cn_raster(landcover_raster,
                    lookup_csv,
                    soils_polygon,
                    soils_hydrogroup_field="SOIL_HYDRO",
                    reference_raster=None,
                    out_cn_raster=None):
    """Build a curve number raster from landcover raster, soils polygon, and a crosswalk between 
    landcover classes, soil hydro groups, and curve numbers.

    :param lookup_csv: [description]
    :type lookup_csv: [type]
    :param landcover_raster: [description]
    :type landcover_raster: [type]
    :param soils_polygon: polygon containing soils with a hydro classification. 
    :type soils_polygon: [type]
    :param soils_hydrogroup_field: [description], defaults to "SOIL_HYDRO" (from the NCRS soils dataset)
    :type soils_hydrogroup_field: str, optional
    :param out_cn_raster: [description]
    :type out_cn_raster: [type]    
    """

    # GP Environment ----------------------------
    msg("Setting up GP Environment...")
    # if reference_raster is provided, we use it to set the GP environment for
    # subsequent raster operations
    if reference_raster:
        if not isinstance(reference_raster, Raster):
            # read in the reference raster as a Raster object.
            reference_raster = Raster(reference_raster)
    else:
        reference_raster = Raster(landcover_raster)

    # set the snap raster, cell size, and extent, and coordinate system for subsequent operations
    env.snapRaster = reference_raster
    env.cellSize = reference_raster.meanCellWidth
    env.extent = reference_raster
    env.outputCoordinateSystem = reference_raster

    cs = env.outputCoordinateSystem.exportToString()

    # SOILS -------------------------------------

    msg("Processing Soils...")
    # read the soils polygon into a raster, get list(set()) of all cell values from the landcover raster
    soils_raster_path = so("soils_raster")
    PolygonToRaster_conversion(soils_polygon, soils_hydrogroup_field,
                               soils_raster_path, "CELL_CENTER")
    soils_raster = Raster(soils_raster_path)

    # use the raster attribute table to build a lookup of raster values to soil hydro codes
    # from the polygon (that were stored in the raster attribute table after conversion)
    if not soils_raster.hasRAT:
        msg("Soils raster does not have an attribute table. Building...",
            "warning")
        BuildRasterAttributeTable_management(soils_raster, "Overwrite")
    # build a 2D array from the RAT
    fields = ["Value", soils_hydrogroup_field]
    rows = [fields]
    # soils_raster_table = MakeTableView_management(soils_raster_path)
    with SearchCursor(soils_raster_path, fields) as sc:
        for row in sc:
            rows.append([row[0], row[1]])
    # turn that into a dictionary, where the key==soil hydro text and value==the raster cell value
    lookup_from_soils = {v: k for k, v in etl.records(rows)}
    # also capture a list of just the values, used to iterate conditionals later
    soil_values = [v['Value'] for v in etl.records(rows)]

    # LANDCOVER ---------------------------------
    msg("Processing Landcover...")
    if not isinstance(landcover_raster, Raster):
        # read in the reference raster as a Raster object.
        landcover_raster_obj = Raster(landcover_raster)
    landcover_values = []
    with SearchCursor(landcover_raster, ["Value"]) as sc:
        for row in sc:
            landcover_values.append(row[0])

    # LOOKUP TABLE ------------------------------
    msg("Processing Lookup Table...")
    # read the lookup csv, clean it up, and use the lookups from above to limit it to just
    # those values in the rasters
    t = etl\
        .fromcsv(lookup_csv)\
        .convert('utc', int)\
        .convert('cn', int)\
        .select('soil', lambda v: v in lookup_from_soils.keys())\
        .convert('soil', lookup_from_soils)\
        .select('utc', lambda v: v in landcover_values)

    # This gets us a table where we the landcover class (as a number) corresponding to the
    # correct value in the converted soil raster, with the corresponding curve number.

    # DETERMINE CURVE NUMBERS -------------------
    msg("Assigning Curve Numbers...")
    # Use that to reassign cell values using conditional map algebra operations
    cn_rasters = []
    for rec in etl.records(t):
        cn_raster_component = Con(
            (landcover_raster_obj == rec.utc) & (soils_raster == rec.soil),
            rec.cn, 0)
        cn_rasters.append(cn_raster_component)

    cn_raster = CellStatistics(cn_rasters, "MAXIMUM")

    # REPROJECT THE RESULTS -------------------
    msg("Reprojecting and saving the results....")
    if not out_cn_raster:
        out_cn_raster = so("cn_raster", "random", "in_memory")

    ProjectRaster_management(in_raster=cn_raster,
                             out_raster=out_cn_raster,
                             out_coor_system=cs,
                             resampling_type="NEAREST",
                             cell_size=env.cellSize)

    # cn_raster.save(out_cn_raster)
    return out_cn_raster
Example #5
0
def accidentdataextractor(countyNo):
    extractDataOutGDB = r'Database Connections\[email protected]'
    extractDataOutName = r'GEO.ACC_' + countyNo
    extractDataOutPathFull = os.path.join(extractDataOutGDB,
                                          extractDataOutName)

    tableSelectSQL = """ "COUNTY_NBR" = '""" + countyNo + """' """
    MakeTableView_management(extractDataInPathFull, tableViewName,
                             tableSelectSQL)
    accidentDataFull = list()
    accSCursor = SearchCursor(tableViewName, accidentTableFieldNameList)

    for accItem in accSCursor:
        #print "Adding the data with acc_key of: "  + str(accItem[accKeyPosition]) + " to the accidentDataFull list."
        accidentDataFull.append(accItem)

    try:
        del accSCursor
    except:
        pass

    try:
        Delete_management(tableViewName)
    except:
        pass

    if Exists(extractDataOutPathFull):
        print extractDataOutPathFull + " already exists."
        existingKeysList = list()

        outSCursor = SearchCursor(extractDataOutPathFull, "ACCIDENT_KEY")
        for outSItem in outSCursor:
            #print "This key already exists in the output table: "  + str(outSItem[0])
            existingKeysList.append(outSItem[0])

        accDataKeysFull = [
            accDataRow[accKeyPosition] for accDataRow in accidentDataFull
        ]

        # Use a set because finding membership in a set is O(1)
        # whereas finding membership in a list is O(n).
        # That's a problem when trying to find membership in
        # Johnson county, as it currently takes 65,000 * 65,000 * t
        # instead of just 65,000 * t.
        # This causes the script to lag for a long time on
        # county no 046.
        # When using a set instead, it takes about 3 seconds.
        existingKeysSet = set(existingKeysList)

        accDataKeysToInsert = [
            accDataKey for accDataKey in accDataKeysFull
            if accDataKey not in existingKeysSet
        ]

        extractDataOutCursor = InsertCursor(extractDataOutPathFull,
                                            accidentTableFieldNameList)
        for accDataItem in accidentDataFull:
            if str(accDataItem[accKeyPosition]) in accDataKeysToInsert:
                insertedItem = extractDataOutCursor.insertRow(accDataItem)
                print "Inserted a row with OBJECTID " + str(insertedItem) + "."

            else:
                pass

        #searchcursor here to get all the existing accident keys from the current output table.
        # comparison to see which rows from the tableView don't already
        # exist in the current output table
        # insertcursor here to place only missing accident key rows
        # from the searchCursor into the extractDataOutPathFull.

    else:
        CreateTable_management(extractDataOutGDB, extractDataOutName,
                               extractDataInPathFull)
        extractDataOutCursor = InsertCursor(extractDataOutPathFull,
                                            accidentTableFieldNameList)
        for accDataItem in accidentDataFull:
            extractDataOutCursor.insertRow(accDataItem)
Example #6
0
    def __init__(self, lyr_path, dico_lyr, tipo, txt=''):
        u""" Uses OGR functions to extract basic informations about
        geographic vector file (handles shapefile or MapInfo tables)
        and store into dictionaries.

        lyr_path = path to the LYR file
        dico_lyr = dictionary for global informations
        tipo = format
        text = dictionary of text in the selected language

        see: http://resources.arcgis.com/fr/help/main/10.2/index.html#//00s300000008000000
        """
        # changing working directory to layer folder
        chdir(path.dirname(lyr_path))

        # raising arcpy specific exceptions
        self.alert = 0

        # opening LYR
        try:
            layer_obj = Layer(lyr_path)
        except:
            logging.error("Unable to open this file: ", lyr_path)
            return None

        # ------------ Basics ----------------
        dico_lyr[u'name'] = layer_obj.name
        dico_lyr[u'description'] = layer_obj.description
        dico_lyr[u'folder'] = path.dirname(lyr_path)
        # by default let's start considering there is only one layer
        dico_lyr[u'layers_count'] = 1
        dico_lyr['broken'] = layer_obj.isBroken

        # ------------ LYR type ----------------
        if layer_obj.isFeatureLayer:
            dico_lyr[u'type'] = txt.get('lyr_featL')
            self.infos_geos(layer_obj, dico_lyr)
            self.infos_basics(layer_obj, dico_lyr)
            # features
            # dico_lyr[u'num_obj'] = int(obj_count(lyr_path).getOutput(0))
            # fields
            dico_fields = OrderedDict()
            if layer_obj.isBroken:
                self.erratum(dico_lyr, lyr_path, u'err_corrupt')
                self.alert = self.alert + 1
                return None
            else:
                pass

            try:
                self.infos_fields(layer_obj, dico_lyr, dico_fields)
                dico_lyr[u'fields'] = dico_fields
            except RuntimeError:
                self.erratum(dico_lyr, lyr_path, u'err_corrupt')
                self.alert = self.alert + 1
                return None

            # count features
            with SearchCursor(lyr_path, [dico_fields.keys()[0]]) as cursor:
                rows = {row[0] for row in cursor}

            count = 0
            for row in rows:
                count += 1
            dico_lyr[u'num_obj'] = count

        elif layer_obj.isRasterLayer:
            dico_lyr[u'type'] = txt.get('lyr_rastL')
            self.infos_geos(layer_obj, dico_lyr)
            self.infos_basics(layer_obj, dico_lyr)
        elif layer_obj.isRasterizingLayer:
            dico_lyr[u'type'] = txt.get('lyr_rastzL')
            self.infos_basics(layer_obj, dico_lyr)
        elif layer_obj.isServiceLayer:
            dico_lyr[u'type'] = txt.get('lyr_servL')
            self.infos_basics(layer_obj, dico_lyr)
            if layer_obj.supports("SERVICEPROPERTIES"):
                self.infos_service(layer_obj.serviceProperties, dico_lyr)
            else:
                self.erratum(dico_lyr, lyr_path, u'err_incomp')
                self.alert = self.alert + 1
                return None
        elif layer_obj.isNetworkAnalystLayer:
            dico_lyr['type'] = txt.get('lyr_netwaL')
            self.infos_basics(layer_obj, dico_lyr)
        elif layer_obj.isGroupLayer:
            dico_lyr['type'] = txt.get('lyr_groupL')
            self.infos_basics(layer_obj, dico_lyr)
            # layers inside
            sublayers = ListLayers(layer_obj)
            dico_lyr['layers_count'] = len(sublayers) - 1
            dico_lyr['layers_names'] = [
                sublyr.name for sublyr in sublayers[1:]
            ]
            dico_lyr['layers_sources'] = [
                sublyr.dataSource for sublyr in sublayers[1:]
                if sublyr.supports("DATASOURCE")
            ]
        else:
            self.erratum(dico_lyr, lyr_path, u'err_incomp')
            self.alert = self.alert + 1
            return None

        # scale
        dico_lyr['maxScale'] = layer_obj.maxScale
        dico_lyr['minScale'] = layer_obj.minScale

        # secondary
        dico_lyr['license'] = layer_obj.credits
        dico_lyr['broken'] = layer_obj.isBroken

        # dependencies
        dependencies = [
            f for f in listdir(path.dirname(lyr_path))
            if path.splitext(path.abspath(f))[0] == path.splitext(lyr_path)[0]
            and not path.splitext(path.abspath(f).lower())[1] == ".lyr"
            or path.isfile('%s.xml' % f[:-4])
        ]
        dico_lyr[u'dependencies'] = dependencies

        # cumulated size
        dependencies.append(lyr_path)
        total_size = sum([path.getsize(f) for f in dependencies])
        dico_lyr[u"total_size"] = self.sizeof(total_size)
        dependencies.pop(-1)

        # global dates
        dico_lyr[u'date_actu'] = strftime('%d/%m/%Y',
                                          localtime(path.getmtime(lyr_path)))
        dico_lyr[u'date_crea'] = strftime('%d/%m/%Y',
                                          localtime(path.getctime(lyr_path)))
Example #7
0
# the feature class that I used which was in the outgoing\TSS folder's
# Data_Transmit.gdb.
# Do a quick check to see if any of the ramps are multipart/have
# duplicate LRS_KEY values -- then find out if that's
# supposed to be the case.

from arcpy.da import SearchCursor

print 'Check starting...'

rampFeatures = r'C:\GIS\Geodatabases\KHUB\Data_Mirroring_02_Prime.gdb\Export_Ramps'
rampFields = ['LRS_KEY', 'OBJECTID']

lrsKeyList = list()

newCursor = SearchCursor(rampFeatures, rampFields)

# Read all of the LRS_Keys in a feature class.
for rampItem in newCursor:
    rampLRSKey = str(rampItem[0])
    # If there is a duplicate LRS_Key, complain to the user.
    if rampLRSKey in lrsKeyList:
        print 'The LRS_KEY: ' + rampLRSKey + ' is a duplicate!'
    else:
        pass
    lrsKeyList.append(rampLRSKey)

try:
    del newCursor
except:
    pass
def CreateUniqueRoadNameTable(optionsObject):
    """ From: Kansas_Unique_Road_Name_List_By_County.py
    This is an search cursor that creates an in_memory table listing all
    of the unique road names in a county.
    
    The primary purpose of the table that this function returns
    is to be used by the preAccidentGeocodingUpdate function
    to inflate the road names from 2010 and prior accident
    data that has been truncated to 6 characters or less.
    
    Note: The 6 character maximum was removed partway
    through 2010, so a significant portion of the 2010 data
    does not have that limitation.
    """

    inputCenterlines = optionsObject.roadsFeaturesLocation

    try:
        # Set the variables
        # These should be provided by the optionsObject and
        # modified by input parameters
        # (if available how the script was called).

        GDBPath = "in_memory"
        env.workspace = "in_memory"
        tableName = "UniqueRoadNames"
        sortedTableName = "UniqueRoadNames_Sorted"
        fieldName2 = "RD"
        fieldLength2 = 99
        roadList = list()
        newRoadList = list()

        cursorFields = ["RD"]

        ### If the table already exists, delete it.
        if Exists(tableName):
            Delete_management(tableName)
        else:
            pass

        ### Write a new table to the geodatabase.
        CreateTable_management(GDBPath, tableName)

        ### Add a field to the new table.
        AddField_management(tableName, fieldName2, "TEXT", "", "",
                            fieldLength2)

        # Create a new search cursor to get road names.
        # Repoint this to the passed in parameter for the roads centerline.
        sCursor = SearchCursor(inputCenterlines, cursorFields)

        for rowItem in sCursor:
            roadListPart = [rowItem[0]]

            # Append the current road name to the road name list.
            roadList.append(roadListPart)

        # Delete cursor to remove locks on the data
        try:
            del sCursor
        except:
            pass

        # Print the first and last item in the roadList.
        AddMessage("roadList first item:")
        AddMessage(str(roadList[0]))
        AddMessage("roadList last item:")
        AddMessage(str(roadList[-1]))

        print ""

        # Assign only the unique values in roadList to a new list.
        for i in roadList:
            if (i not in newRoadList):
                newRoadList.append(i)

        # Write the newRoadList to the new table using an insert cursor.
        iCursor = InsertCursor(tableName, cursorFields)

        # Rewrite this so that it properly uses the information that it iterates over.
        for roadPlaceHolder in newRoadList:
            ##AddMessage(str(roadPlaceHolder[0]))
            iCursor.insertRow(roadPlaceHolder)

        # Delete cursor to remove locks on the data
        try:
            del iCursor
        except:
            pass

        Sort_management(tableName, sortedTableName, [["RD", "ASCENDING"]])

    except Exception as Exception1:
        print str(Exception1[0])
        try:
            del Exception1
        except:
            pass

    # Even if the program has an exception, it should still delete the cursor objects.
    finally:
        try:
            del sCursor
        except:
            pass
        try:
            del iCursor
        except:
            pass
"""

import os

from arcpy import AddField_management, AddMessage, CreateFeatureclass_management, GetParameterAsText
from arcpy.da import InsertCursor, SearchCursor

in_table = GetParameterAsText(0)  # Feature Layer
friendly_value = GetParameterAsText(1)  # Integer
output = GetParameterAsText(2)  # Feature Layer

# Get data from input-table
where_clause = '{0} = 1'.format(friendly_value)
field_names = ['SHAPE@XY', 'Type']
rows = list()
with SearchCursor(in_table, field_names, where_clause) as sc:
    for row in sc:
        rows.append(row)

# Create new feature class
path = '\\'.join(in_table.split('\\')[:-1])
output_name = output.split('\\')[-1]
CreateFeatureclass_management(path, output_name, 'POINT')

# Add fields to the new feature class
fields = [
    ('Type', 'TEXT'),
    ('Friendly', 'SHORT')
]
for f in fields:
    AddField_management(output, f[0], f[1])
Example #10
0
def checkRequiredFieldValues(gdb, folder, esb):
    userMessage("Checking that required fields have all values...")
    from os import path
    from arcpy.da import Walk, SearchCursor
    from arcpy import MakeTableView_management, Delete_management, GetCount_management, ListFields
    from time import strftime

    #get today's date
    today = strftime("%m/%d/%y")

    #get required fields
    rfDict = getRequiredFields(folder)

    id = "OBJECTID"
    values = []

    #walk through the tables/feature classes
    for dirpath, dirnames, filenames in Walk(gdb, True, '', False,
                                             ["Table", "FeatureClass"]):
        for filename in filenames:
            fullPath = path.join(gdb, filename)

            #get the keyword to acquire required field names
            keyword = getKeyword(filename, esb)

            #goal: get list of required fields that are present in the feature class
            #get the appropriate required field list
            if keyword in rfDict:
                requiredFieldList = rfDict[keyword]

            rfl = []
            for rf in requiredFieldList:
                rfl.append(rf.upper())

            #get list of fields in the feature class
            allFields = ListFields(fullPath)

            #make list of field names
            fields = []
            for aF in allFields:
                fields.append(aF.name.upper())

            #convert lists to sets
            set1 = set(rfl)
            set2 = set(fields)

            #get the set of fields that are the same
            matchingFields = list(set1 & set2)

            #create where clause to select any records where required values aren't populated
            wc = ""

            for field in matchingFields:
                wc = wc + " " + field + " is null or "

            wc = wc[0:-4]

            #make table view using where clause
            lyr = "lyr"
            MakeTableView_management(fullPath, lyr, wc)

            #get count of the results
            result = GetCount_management(lyr)
            count = int(result.getOutput(0))

            #if count is greater than 0, it means a required value somewhere isn't filled in
            if count > 0:
                #make sure the objectID gets included in the search for reporting
                if id not in matchingFields:
                    matchingFields.append(id)

                i = len(matchingFields)
                k = 0

                #run a search cursor to get any/all records where a required field value is null
                with SearchCursor(fullPath, (matchingFields), wc) as rows:
                    for row in rows:

                        #get object ID of the field
                        oid = str(row[matchingFields.index(id)])

                        #loop through row
                        while k < 0:
                            #see if the value is nothing
                            if row[k] is None:
                                #report the value if it is indeed null
                                report = matchingFields[
                                    k] + " is null for ObjectID " + oid
                                userMessage(report)
                                val = (today, report, filename,
                                       matchingFields[k], oid)
                                values.append(val)

                            #iterate!
                            k = k + 1
            else:
                userMessage("All required values present for " + filename)

            Delete_management(lyr)

    if values != "":
        RecordResults("fieldValues", values, gdb)

    userMessage("Completed check for required field values")
Example #11
0
def UpdateKdotNameInCenterline(centerlineToIntersect, centerlineAliasTable):
    ###############################################################################
    # Create a list here for output and then use logic on the dictionary to decide
    # what value you want the KDOT_ROUTENAME in the centerline feature class to have.
    # Then, use an update cursor to match the SEGID with the value to update.
    ###############################################################################
    
    # Need to check to see if the centerlineToIntersect has a field that already
    # exists for the KDOT_ROUTENAME, and if not, create one.
    
    # Create a list of fields using the ListFields function
    fieldsList = ListFields(centerlineToIntersect)
    
    fieldNamesList = list()
    
    # Iterate through the list of fields
    for field in fieldsList:
        fieldNamesList.append(field.name)
    
    # If the KDOT_ROUTENAME field is not found,
    # add it with adequate parameters.
    if "KDOT_ROUTENAME" not in fieldNamesList:
        #AddMessage("Adding KDOT_ROUTENAME to " + centerlineToIntersect + ".")
        #previousWorkspace = env.workspace  # @UndefinedVariable
        addFieldWorkspace = getGDBLocationFromFC(centerlineToIntersect)
        env.workspace = addFieldWorkspace
        
        fieldNameToAdd = "KDOT_ROUTENAME"
        fieldLength = 10
        
        AddField_management(centerlineToIntersect, fieldNameToAdd, "TEXT", "", "", fieldLength)
        
        # Set the workspace back to what it was previously to prevent
        # problems from occurring in the rest of the script.
        #env.workspace = previousWorkspace
        AddMessage("The " + str(fieldNameToAdd) + " field was added to " + str(centerlineToIntersect) + ".")
    
    else:
        AddMessage("The KDOT_ROUTENAME field already exists within " + centerlineToIntersect + ".")
        AddMessage("It will not be added again, but its values will be updated (where necessary).")
    
    aliasFields = ['SEGID', 'KDOT_ROUTENAME']
    
    #for fieldNameItem in fieldNamesList:
        #print fieldNameItem
    
    aliasCursor = SearchCursor(centerlineAliasTable, aliasFields)
    
    aliasList = list()
    
    for aliasRow in aliasCursor:
        if aliasRow[1] is not None:
            aliasList.append(aliasRow)
        else:
            pass
    try:
        del aliasCursor
    except:
        pass
    
    aliasDictionary = dict()
    
    for aliasListItem in aliasList:
        if aliasListItem[0] in aliasDictionary.keys():
            listContainer = aliasDictionary[aliasListItem[0]]
            listContainer.append(aliasListItem)
            aliasDictionary[aliasListItem[0]] = listContainer
        else:
            listContainer = list()
            listContainer.append(aliasListItem)
            aliasDictionary[aliasListItem[0]] = listContainer
    
    aliasListForUpdate = list()
    
    for aliasDictKey in aliasDictionary.keys():
        listContainer = aliasDictionary[aliasDictKey]
        bestRouteName = ''
        for listContainerItem in listContainer:
            currentRouteName = listContainerItem[1]
            # Logic to decide route to use based on route dominance is in
            # the compareRouteNames function.
            bestRouteName = compareRouteNames(bestRouteName, currentRouteName)
            
        aliasListForUpdate.append((aliasDictKey, bestRouteName))
    
    # Have to start an edit session because the feature class participates in a topology.
    try:
        editWorkspace = getGDBLocationFromFC(centerlineToIntersect)
                
        editSession = Editor(editWorkspace)
        
        editSession.startEditing(False, False)
        
        editSession.startOperation()
        
        routeToUpdateCursor = UpdateCursor(centerlineToIntersect, aliasFields)
        
        for routeToUpdate in routeToUpdateCursor:
            routeToUpdate = list(routeToUpdate)
            for aliasForUpdate in aliasListForUpdate:
                if routeToUpdate[0] == aliasForUpdate[0]:
                    routeToUpdate[1] = aliasForUpdate[1]
                else:
                    pass
            
            routeToUpdateCursor.updateRow(routeToUpdate)
        
        del routeToUpdateCursor
        
        editSession.stopOperation()
        
        editSession.stopEditing(True)
    
    except ExecuteError:
       AddMessage((GetMessages(2)))
Example #12
0
def OffsetDirectionMatrix2(offsetOptions):
    """Update the accidentDataWithOffsetOutput geometry with data from geocodedFeatures.
    
    Keyword arguments to be included in the options class:
    gdbLocation -- The gdb where the outputWithOffsetLocations feature class resides.
    accidentDataAtIntersections -- A point feature class containing geocoded accident information.
    accidentDataWithOffsetOutput -- A point feature class with the same structure as the 
        geocodedFeatuers AND an "isOffset" row of type "TEXT" with length of at least 5.
    whereClauseInUse -- Whether or not the script will use a where clause. Boolean value.
    roadsFeaturesLocation -- The path to the local roads centerline feature class.
    aliasTable -- The path to the roads alias table for the roads centerline feature class.
    maxDegreesDifference -- The number of degrees that a potentially matching accident
        offset location can be from the direction specified. If this is set to -1, the check
        will be skipped and no matching accident offset locations will be rejected, even if
        they are in the opposite direction from where the accident record says they should
        be. I.e. the accident could be offset to the North when the accident record says that
        it should be South of the intersection when this check is skipped.
    XYFieldList -- The list of fields to use from the copy of the geocoded accidents feature
        class after that copy has had POINT_X and POINT_Y fields added and calculated.
    """
    
    ###########################################################################
    ## Function overview:
    ## For each row in the feature class of accidents that have been geolocated
    ## to an intersection:
    ###########################################################################
    # Make sure that the Status for the point is not 'U' -- Unlocated.
    # Might take care of test for 'U' points before getting to this
    # step in the process, but if not, be sure to test for it here.
    # Create/calculate intersection X & Y field named POINT_X and POINT_Y.
    # Then, calculate those fields.
    # Then, create a buffer.
    # Then, select the On_Road in the roads layer.
    # Then, intersect the buffer with the roads layer to create an offset
    # points layer.
    # Then, split the offset points from potential multipart points to
    # singlepart points.
    ###########################################################################
    # Then, use the "SHAPE@XY" token to access the X & Y of the individual
    # offset points and compare them to the X & Y values in the POINT_X and
    # POINT_Y fields, which hold the values for the related roads' intersection
    # that the accidents were geolocated to.
    # Then, test the intersected points to find the best one for the given
    # direction.
    ###########################################################################
    # Then, append the information for that point into a list.
    # Then, delete the buffer and intersection layer.
    # Repeat for each other row...
    ###########################################################################
    # When all the rows are finished,
    # Append the attribute information for the
    # related accident into each offset point's row.
    # Lastly, write the data for all the offset point rows
    # into the output layer.
    ###########################################################################
    # Maximum angle difference code confirmed to be working. -- 2015-03-18
    # 771/771 manually checked look good (for the information given) using
    # UpdateKdotNameInCenterline(), Where Clause for selection, and
    # Maximum Angle Difference.
    # Locates 771/862 non-'U' points without the modified versions of 
    # ON_ROAD_NAME/AT_ROAD/AT_ROAD_DIRECTION/AT_ROAD_DIST_FEET labeled fields
    # and 803/862 with them. 
    ###########################################################################
    
    AddMessage("The value of the useKDOTFields option is: " + str(offsetOptions.useKDOTFields))
    
    roadsToIntersect = offsetOptions.roadsFeaturesLocation
    roadsAliasTable = offsetOptions.aliasTable
    geocodedFeatures = offsetOptions.accidentDataAtIntersections
    outputWithOffsetLocations = offsetOptions.accidentDataWithOffsetOutput
    whereClauseFlag = offsetOptions.whereClauseInUse
    maximumDegreesDifference = offsetOptions.maxDegreesDifference
    KDOTFieldUse = offsetOptions.useKDOTFields
    
    AddMessage("The value for KDOTFieldUse is: " + str(KDOTFieldUse))
    
    if str(KDOTFieldUse).lower() == 'false':
        featuresWithXYFieldList = offsetOptions.NonKDOTXYFieldList
        AddMessage("Using nonKDOTXYFieldList.")
    else:
        featuresWithXYFieldList = offsetOptions.KDOTXYFieldList
    
    geodatabaseLocation = getGDBLocationFromFC(outputWithOffsetLocations)
    
    env.workspace = geodatabaseLocation
    env.overwriteOutput = True
    geocodedWhereClause = "STATUS <> 'U'"
    featuresWithXY = 'geocodedWithXY'
    geocodedLocXY = r'in_memory\geocodedFeatures_Loc_XY' # Changed this to an in_memory location also.
    
    # Scratch data locations
    intermediateAccidentBuffer = r'in_memory\intermediateAccidentBuffer'
    intermediateAccidentIntersect = r'in_memory\intermediateAccidentIntersect'
    intermediateAccidentIntersectSinglePart = r'in_memory\intermediateAccidentIntersectSinglePart'
    # Added 2016-09-06 after the Wichita Area points started processing. Slowly.
    intermediateRoadsToIntersect = r'in_memory\roadsToIntersect'
    intermediateRoadsAliasTable = r'in_memory\roadsAliasTable'
    
    descSpatialReference = Describe(geocodedFeatures).spatialReference
    
    # Make a feature layer of geocodedFeatures using a where clause to restrict to those points
    # which have been located to an intersection, then add XY to it.
    MakeFeatureLayer_management(geocodedFeatures, featuresWithXY, geocodedWhereClause)
    CopyFeatures_management(featuresWithXY, geocodedLocXY)
    AddXY_management(geocodedLocXY)
    
    roadsAsFeatureLayer = 'ConflatedRoadsFeatureLayer'
    
    # Roads copied to memory.
    CopyFeatures_management(roadsToIntersect, intermediateRoadsToIntersect)
    
    MakeFeatureLayer_management(intermediateRoadsToIntersect, roadsAsFeatureLayer)
    
    # Use Point_X & Point_Y for the geolocated intersection location.
    # Use shape tokens for the x & y of the points which
    # result from intersecting the buffer & road geometries.    
    
    geocodedAccidentsList = list()
    singlePartOffsetAccidentsList = list()
    
    print "The path of the geocodedFeatures used is: " + geocodedFeatures
    
    #AddMessage("The field names used in the search cursor are:")
    #for fieldListItem in featuresWithXYFieldList:
    #    AddMessage(fieldListItem)
    
    accidentsCursor = SearchCursor(geocodedLocXY, featuresWithXYFieldList)
    
    for accidentRow in accidentsCursor:
        geocodedAccidentsList.append(accidentRow)
    
    try:
        del accidentsCursor
    except:
        pass
    
    print 'whereClauseFlag is: ' + str(whereClauseFlag)
    print 'Starting the offset process...'
    
    accCounter = -1
    
    env.outputCoordinateSystem = descSpatialReference
    
    if whereClauseFlag == True:
        
        # Don't need to create a relate or a join.
        # Just need to do a select on the would-be joined/related table
        # to get the SEGIDs, then use those to do a select
        # for the GCIDs the conflation roads.
        
        # Try using table to table here instead of copy features.
        # For some reason, arcpy doesn't like this table when it's in the
        # ar63 FGDBs.
        TableToTable_conversion(roadsAliasTable, 'in_memory', 'roadsAliasTable') # == intermediateRoadsAliasTable
        #CopyFeatures_management(roadsAliasTable, intermediateRoadsAliasTable)
        
        roadsAliasTableView = MakeTableView_management(intermediateRoadsAliasTable, 'roadsAliasTableView')
        
        for geocodedAccident in geocodedAccidentsList:
            accCounter += 1
            print 'Working on geocodedAccident #' + str(accCounter)
            # Create a point here with the x & y from the geocodedAccident,
            # add the coordinate system, OBJECTID, and AccidentID
            # from the geocodedAccident layer.
            # Then, create a buffer with it.
            
            #if geocodedAccident[2] is not None and geocodedAccident[3] is not None:
            tempPoint = Point(geocodedAccident[2], geocodedAccident[3])
            #print "\t " + str(tempPoint.X) + ", " + str(tempPoint.Y)
            tempPointGeometry = PointGeometry(tempPoint, descSpatialReference)
            accidentDistanceOffset = geocodedAccident[7]
            accidentClusterTolerance = 1
            
            
            try:
                #####################
                # Offsetting while using a WhereClause follows:
                #####################
                if accidentDistanceOffset is not None: # In Python it's None, whereas in an ArcGIS table it's <null>
                    
                    accidentDistanceOffset = int(accidentDistanceOffset)
                    
                    if accidentDistanceOffset != 0:
                        
                        Buffer_analysis(tempPointGeometry, intermediateAccidentBuffer, accidentDistanceOffset)
                        
                        firstRoadName = str(geocodedAccident[5])
                        if firstRoadName is not None:
                            firstRoadName = firstRoadName.upper()
                        else:
                            firstRoadName = 'NotAValidRoad'
                        
                        secondRoadName = str(geocodedAccident[8])
                        if secondRoadName is not None:
                            secondRoadName = secondRoadName.upper()
                        else:
                            secondRoadName = 'NotAValidRoad'
                        
                        thirdRoadName = ParseMatchAddr(geocodedAccident[9])
                        if thirdRoadName is not None:
                            thirdRoadName = thirdRoadName.upper()
                        else:
                            thirdRoadName = 'NotAValidRoad'
                        
                        roadNamesList = [firstRoadName, secondRoadName, thirdRoadName]
                        
                        aliasIDsList = getAliasIDs(roadNamesList, roadsAliasTableView)
                        
                        aliasIDsLength = len(aliasIDsList)
                        
                        if aliasIDsLength != 0:
                            aliasIDsString = """("""
                            for x in xrange(aliasIDsLength):
                                if (x != (aliasIDsLength - 1)):
                                    aliasIDsString += """'""" + aliasIDsList[x] + """',"""
                                else:
                                    aliasIDsString += """'""" + aliasIDsList[x] + """')"""
                            
                            streetWhereClause = (""" "RD" = '""" + firstRoadName  + """'""" + """ OR """ +
                                                """ "LABEL" = '""" + firstRoadName + """'""" + """ OR """ +
                                                """ "RD" = '""" + secondRoadName  + """'""" + """ OR """ +
                                                """ "LABEL" = '""" + secondRoadName + """'""" + """ OR """ +
                                                """ "RD" = '""" + thirdRoadName  + """'""" + """ OR """ +
                                                """ "LABEL" = '""" + thirdRoadName + """'""" +
                                                """ OR GCID IN """ + aliasIDsString)
                        else:
                            #Without the aliasIDs.
                            streetWhereClause = (""" "RD" = '""" + firstRoadName  + """'""" + """ OR """ +
                                                """ "LABEL" = '""" + firstRoadName + """'""" + """ OR """ +
                                                """ "RD" = '""" + secondRoadName  + """'""" + """ OR """ +
                                                """ "LABEL" = '""" + secondRoadName + """'""" + """ OR """ +
                                                """ "RD" = '""" + thirdRoadName  + """'""" + """ OR """ +
                                                """ "LABEL" = '""" + thirdRoadName + """'""")
                        
                        
                        SelectLayerByAttribute_management(roadsAsFeatureLayer, "NEW_SELECTION", streetWhereClause)
                        
                        selectionCount = str(int(GetCount_management(roadsAsFeatureLayer).getOutput(0)))
                        
                        if int(selectionCount) != 0:
                            featuresToIntersect = [roadsAsFeatureLayer, intermediateAccidentBuffer]
                            Intersect_analysis(featuresToIntersect, intermediateAccidentIntersect, "ALL", "", "POINT")
                            
                            if  int(str(GetCount_management(intermediateAccidentIntersect))) > 0:
                                MultipartToSinglepart_management(intermediateAccidentIntersect, intermediateAccidentIntersectSinglePart)
                                
                                singlePartsCursor = SearchCursor(intermediateAccidentIntersectSinglePart, ['SHAPE@XY'])
                                for singlePart in singlePartsCursor:
                                    singlePartListItem = [singlePart[0], geocodedAccident[2], geocodedAccident[3], geocodedAccident[4],
                                                               geocodedAccident[6], geocodedAccident[0]]
                                    singlePartOffsetAccidentsList.append(singlePartListItem)
                                
                                try:
                                    del singlePartsCursor
                                except:
                                    pass
                            else:
                                pass
                            try:
                                del intermediateAccidentIntersect
                            except:
                                pass
                        else:
                            pass
                            #print 'Zero road segments selected. Will not attempt to offset.'
                    else:
                        pass
                        #print 'AT_ROAD_DIST_FEET is 0. Will not attempt to offset.
                else:
                    pass
                    #print 'AT_ROAD_DIST_FEET is null. Will not attempt to offset.'
                
            except:
                # Need to log the warnings with details so that I know what's wrong with them.
                print "WARNING:"
                print "An error occurred which prevented the accident point with Acc_Key: " + str(geocodedAccident[4])
                print "from being buffered and/or offset properly."
                errorItem = sys.exc_info()[1]
                errorStatement = str(errorItem.args[0])
                print errorStatement
                
                try:
                    del errorItem
                except:
                    pass
    
    
    elif whereClauseFlag == False:
        for geocodedAccident in geocodedAccidentsList:
            
            # Create a point here with the x & y from the geocodedAccident,
            # add the coordinate system, OBJECTID, and AccidentID
            # from the geocodedAccident layer.
            # Then, create a buffer with it.
            
            #if geocodedAccident[2] is not None and geocodedAccident[3] is not None:
            tempPoint = Point(geocodedAccident[2], geocodedAccident[3])
            #print "\t " + str(tempPoint.X) + ", " + str(tempPoint.Y)
            tempPointGeometry = PointGeometry(tempPoint, descSpatialReference)
            accidentDistanceOffset = geocodedAccident[7]
            ##accidentClusterTolerance = 2
            
            try:
                #####################
                # Offsetting while not using a WhereClause follows:
                #####################
                
                if accidentDistanceOffset is not None:
                    if int(accidentDistanceOffset) != 0:
                        accidentDistanceOffset = int(accidentDistanceOffset)
                        
                        Buffer_analysis(tempPointGeometry, intermediateAccidentBuffer, accidentDistanceOffset)
                        
                        featuresToIntersect = [roadsAsFeatureLayer, intermediateAccidentBuffer]
                        Intersect_analysis(featuresToIntersect, intermediateAccidentIntersect, "ALL", "", "POINT")
                        if  int(str(GetCount_management(intermediateAccidentIntersect))) > 0:
                            MultipartToSinglepart_management(intermediateAccidentIntersect, intermediateAccidentIntersectSinglePart)
                            
                            singlePartsCursor = SearchCursor(intermediateAccidentIntersectSinglePart, ['SHAPE@XY'])
                            for singlePart in singlePartsCursor:
                                singlePartListItem = [singlePart[0], geocodedAccident[2], geocodedAccident[3], geocodedAccident[4],
                                                           geocodedAccident[6], geocodedAccident[0]]
                                singlePartOffsetAccidentsList.append(singlePartListItem)
                            
                            try:
                                del singlePartsCursor
                            except:
                                pass
                            try:
                                del intermediateAccidentIntersect
                            except:
                                pass
                        else:
                            pass
                    else:
                        pass
                        # Need to change this to being offset to the intersection, i.e. no movement, but
                        # considered to be correctly offset all the same.
                        #print 'AT_ROAD_DIST_FEET is 0. Will not attempt to offset.' 
                else:
                    pass
                    #print 'AT_ROAD_DIST_FEET is null. Will not attempt to offset.'
            except:
                print "WARNING:"
                print "An error occurred which prevented the accident point with Acc_Key: " + str(geocodedAccident[4])
                print "from being buffered and/or offset properly."
                errorItem = sys.exc_info()[1]
                errorStatement = str(errorItem.args[0])
                print errorStatement
                
                try:
                    del errorItem
                except:
                    pass
    
    
    else:
        print 'Please set the whereClauseFlag to either (boolean) True or False.'
        #pass
    
    offsetDictionaryByAccidentKey = dict()
    listContainer = list()
    
    # Group the rows by accident_key for further analysis,
    # and add them to the dictionary/list/list data structure.
    
    for singlePartOffsetItem in singlePartOffsetAccidentsList:
        if singlePartOffsetItem[3] in offsetDictionaryByAccidentKey.keys():
            listContainer = offsetDictionaryByAccidentKey[singlePartOffsetItem[3]]
            listContainer.append(singlePartOffsetItem)
            offsetDictionaryByAccidentKey[singlePartOffsetItem[3]] = listContainer
        else:
            listContainer = list()
            listContainer.append(singlePartOffsetItem)
            offsetDictionaryByAccidentKey[singlePartOffsetItem[3]] = listContainer
    
    updateListValues = list()
    
    for accidentKey in offsetDictionaryByAccidentKey.keys():
        # accidentKey will be a unique accident key from the table
        listContainer = offsetDictionaryByAccidentKey[accidentKey]
        updateList = [-1, -1, -1, "False"]
        try:
            # Get the AT_ROAD_KDOT_DIRECTION/AT_ROAD_DIRECTION from the first (0th) entry.
            directionToTest = listContainer[0][4] 
            if directionToTest is not None:
                directionToTest = str(directionToTest).upper()
                updateList = findTheMostInterestingRow(listContainer, directionToTest, maximumDegreesDifference)
                if updateList[0] != -1:
                    updateListValues.append(updateList)
                else:
                    # -1 is not a valid Acc_Key. Slight optimization for the next for loop that uses this list so that
                    # it doesn't have to be re-checked each time through the list for each accident in the table.
                    pass
            else:
                print 'Direction to test is null.'
        except:
            pass
    
    accidentUpdateCursorFields = ['ACCIDENT_KEY', 'Shape@XY', 'isOffset']
    
    accidentUpdateCursor = UpdateCursor(outputWithOffsetLocations, accidentUpdateCursorFields)
    for cursorItem in accidentUpdateCursor:
        for updateListItem in updateListValues:
            if cursorItem[0] == updateListItem[0]:
                if str(cursorItem[2]).upper() == 'TRUE':    # Don't make any changes if true.
                    AddMessage('The accident point with Acc_Key: ' + str(cursorItem[0]) + ' is already offset.')
                else:                                       # Otherwise, offset the point.
                    editableCursorItem = list(cursorItem)
                    #AddMessage('Found a matching cursorItem with an Accident_Key of ' + str(cursorItem[0]) + ".")
                    editableCursorItem[1] = (updateListItem[1], updateListItem[2])
                    editableCursorItem[2] = updateListItem[3]
                    #AddMessage(str(editableCursorItem))
                    accidentUpdateCursor.updateRow(editableCursorItem)
                    
            else:
                pass
Example #13
0
def main():
    fc = GetParameterAsText(0)
    out_table = GetParameterAsText(1)
    out_fc = GetParameterAsText(2)

    for thingy in [out_table, out_fc]:
        if Exists(thingy):
            Delete_management(thingy)

    # --------------set up reporting table for new field names-----------------
    field_name = "PRENAME"
    field_count = "NEWCOUNT"
    schema_count = "PRECOUNT"
    new_name = "NEWNAME"

    #see if the output table exists
    if not Exists(out_table):
        CreateTable_management(dirname(out_table), basename(out_table))
    else:
        DeleteRows_management(out_table)

    #see if fields already exist, if not, create them
    if not fieldExists(out_table, field_name):
        AddField_management(out_table, field_name, "TEXT", "", "", 30)

    if not fieldExists(out_table, schema_count):
        AddField_management(out_table, schema_count, "LONG")

    if not fieldExists(out_table, field_count):
        AddField_management(out_table, field_count, "SHORT")

    if not fieldExists(out_table, new_name):
        AddField_management(out_table, new_name, "TEXT", "", "", 10)

    # loop through all fields
    all_fields = ListFields(fc)

    # create name dictionary of shortened shapefile names
    name_dictionary = {}
    shortList = []  # necessary for flagging repeated field names

    for fn in all_fields:
        short_name = fn.name
        if len(fn.name) > 10:
            short_name = fn.name[0:10]

        # make sure the shortened field name doesn't already exists
        if short_name not in shortList:
            shortList.append(short_name)
            name_dictionary[fn.name] = short_name
        else:
            i = 0
            while short_name in shortList and i < 100:
                short_name = short_name[0:7] + "_" + str(i)
                i += 1
            name_dictionary[fn.name] = short_name
            shortList.append(short_name)

    # -----next step, create new feature class & add all fields----------------
    # -----for text fields, make the length the proper length------------------

    desc = Describe(fc)
    geom_type = desc.shapeType
    SR = desc.spatialReference

    # create new feature class
    CreateFeatureclass_management(dirname(out_fc), basename(out_fc), geom_type,
                                  "", "", "", SR)

    # create list to hold the names of number fields (used later)
    numFields = []
    dateFields = []

    # get the name of the OID field while looping
    oid = ""

    # loop through string fields
    for f in all_fields:
        short_name = name_dictionary[f.name]
        data_type = f.type.upper()

        # check to see if the data type is "normal"
        if data_type in [
                "TEXT", "FLOAT", "DOUBLE", "SHORT", "LONG", "DATE", "BLOB",
                "RASTER", "GUID", "STRING", "INTEGER", "SMALLINTEGER"
        ]:

            # special track for string fields
            if data_type in ["STRING", "TEXT"]:

                # set counter at 0
                i = 0

                # set up search cursor on feature class just on that field
                with SearchCursor(fc, (f.name)) as rows:
                    for row in rows:
                        if row[0] is not None:
                            # loop through values to get the longest length
                            if len(row[0]) > i:
                                i = len(row[0])

                # make sure i isn't bigger than 254
                if i > 254:
                    i = 254

                # at this point, i equals the length of the longest field entry

                # insert the field name and the length into the output table
                cursor = InsertCursor(
                    out_table,
                    (field_name, field_count, schema_count, new_name))
                new_row = (f.name, i, f.length, short_name)
                cursor.insertRow(new_row)

                del row, rows, cursor, new_row

                # add a row to the new feature class
                AddField_management(out_fc, short_name, "TEXT", "", "", i)

            # track for numbers, GUIDs & dates
            else:
                AddField_management(out_fc, short_name, data_type)

                # if it's a number, record the field name in the num field list
                if data_type in [
                        "SHORT", "LONG", "INTEGER", "FLOAT", "DOUBLE"
                ]:
                    numFields.append(f.name)
                elif data_type in ["DATE"]:
                    dateFields.append(f.name)

                #make sure all fields are in the translation table
                cursor = InsertCursor(out_table, (field_name, new_name))
                new_row = (f.name, short_name)
                cursor.insertRow(new_row)
                del cursor, new_row

        elif data_type == "OID":
            AddField_management(out_fc, "LinkOID", "INTEGER")
            name_dictionary[f.name] = "LinkOID"  # add for field mapping
            oid = f.name

            # add link field for object ID to the mapping table
            cursor = InsertCursor(out_table, (field_name, new_name))
            new_row = (f.name, "LinkOID")
            cursor.insertRow(new_row)
            del cursor, new_row
        elif data_type == "GEOMETRY":
            pass
        else:
            print("Field " + f.name + " is type " + f.type +
                  ". It will not be copied over.")
            AddWarning("Field " + f.name + " is type " + f.type +
                       ". It will not be copied over.")
            del name_dictionary[f.name]

    # -----copy data into the new FC-------------------------------------------

    # set up field lists for search & insert cursors
    oldFields, newFields = [], []

    for field in name_dictionary.keys():
        oldFields.append(field)
        newFields.append(name_dictionary[field])

    # set up a text only version of the fields
    oldFieldsTextOnly = tuple(oldFields)
    newFieldsTextOnly = tuple(newFields)

    # add SHAPE to the original set of fields
    oldFields.append("SHAPE@")
    newFields.append("SHAPE@")

    # convert the new field list to a tuple, safety first
    newFields = tuple(newFields)  # this is the one with the shape field

    # create a list of the indexes of number & date fields
    numFieldsIndexList, dateFieldsIndexList = [], []
    for numF in numFields:
        numFieldsIndexList.append(oldFields.index(numF))
    for dateF in dateFields:
        dateFieldsIndexList.append(oldFields.index(dateF))

    # ran into an issue with invalid geometry, so here's the workaround
    invalidDict = {"point": 1, "polyline": 2, "polygon": 3}

    # set up reporting for records that didn't copy
    didNotCopy = []

    # fill new rows with old rows
    with SearchCursor(fc, oldFields) as rows:
        for row in rows:
            geomIndex = oldFields.index("SHAPE@")
            geom = row[geomIndex]
            objectID = str(row[oldFields.index(oid)])
            try:

                try:
                    # find the minimum number of required points
                    minNum = invalidDict[geom_type.lower()]

                    # get the count of points in the geometry
                    count = geom.pointCount

                    # if the count is smaller than the minimum number, there's a problem
                    if count < minNum:
                        wc = oid + " = " + objectID
                        # here, we won't copy the geometry, only the fields
                        userMessage("count smaller than min")
                        with SearchCursor(fc, oldFieldsTextOnly, wc) as rows2:
                            for row2 in rows2:
                                makeRow(out_fc, newFieldsTextOnly, row2,
                                        numFieldsIndexList,
                                        dateFieldsIndexList)
                        del row2, rows2, wc

                    else:
                        # excellent, the record is normal & will copy
                        makeRow(out_fc, newFields, row, numFieldsIndexList,
                                dateFieldsIndexList)

                except Exception as e:
                    userMessage(str(e))
                    # if we're in this area, it means the record has no geometry
                    wc = oid + " = " + objectID
                    with SearchCursor(fc, oldFieldsTextOnly, wc) as rows2:
                        for row2 in rows2:
                            makeRow(out_fc, newFieldsTextOnly, row2,
                                    numFieldsIndexList, dateFieldsIndexList)
                    del row2, rows2, wc

            except Exception as e:
                userMessage(str(e))
                # for whatever reason, the record did not copy
                userMessage("Error copying record ObjectID " + objectID)
                didNotCopy.append(objectID)

    if didNotCopy != []:
        userMessage("These records did not copy- %s %s" %
                    (oid, ", ".join(didNotCopy)))

    userMessage("Skinny shapefile complete.")
Example #14
0
import os

from arcpy import (AddField_management, env, CreateTable_management,
                   Delete_management, GetCount_management, ListFeatureClasses,
                   MakeFeatureLayer_management, MakeTableView_management,
                   SelectLayerByAttribute_management)
from arcpy.da import (InsertCursor, SearchCursor)  # @UnresolvedImport

# Use a searchCursor to get all of the County abbreviations
# and County numbers from the Shared.Counties layer in SDEPROD.
coAbbrAndNoList = list()

cursorFields = ["COUNTY_ABBR", "COUNTY_NO", "COUNTY_NAME"]

coSCursor = SearchCursor(
    r'Database Connections\[email protected]\SHARED.COUNTIES', cursorFields)

for cursorItem in coSCursor:
    coAbbrAndNoList.append(cursorItem)

extractDataGDBPath = r'Database Connections\[email protected]'

reportResultsList = list()

insertCursorFields = [
    "Name", "TotalAccidents", "GcPercent", "GcNKPercent", "OfsPercent",
    "OfsNKPercent"
]


def iteratorprocess():
Example #15
0
    def rows(self):
        """Extracts a feature's table for analysis

        Extracts FACILITYID, GLOBALID, edit metadata, and SHAPE fields
        of a feature class or table. Edit metadata fields are
        dynamically assigned based on attributes of a fc's describe obj.
        FACILITYIDs are further broken into {"prefix": x, "str_id": y,
        "int_id": z}.

        Returns
        -------
        tuple
            rows represented as dicitionaries
        """

        edit_fields = [self.creatorFieldName,
                       self.createdAtFieldName,
                       self.editorFieldName,
                       self.editedAtFieldName]
        fields = ['GLOBALID', 'FACILITYID'] + edit_fields
        if self.datasetType == 'FeatureClass':
            fields.append('SHAPE@')

        row_list = []
        with SearchCursor(self.full_path, fields) as search:
            for row in search:
                row_list.append({fields[i]: row[i]
                                 for i in range(len(fields))})

        # Transform the output of the FACILITYID field by breaking apart
        # the value into prefix, id as string, and id as integer
        for row in row_list:
            if row["FACILITYID"]:
                f_id = str(row["FACILITYID"])
                # Use regex to find the prefix of the row's FACILITYID
                try:
                    pfix = re.findall(r"^\D+", f_id)[0]
                # re.findall returns [] if the pattern doesn't exist
                except IndexError:
                    pfix = ""

                # Define the ID as everything following the prefix
                id_str = f_id[len(pfix):]

                # Convert the string ID to integer
                try:
                    id_int = int(id_str)
                # if id_str has non-numeric chars, assume no ID
                except ValueError:
                    id_str = ""
                    id_int = None

                row["FACILITYID"] = {"prefix": pfix,
                                     "str_id": id_str,
                                     "int_id": id_int}
            else:
                row["FACILITYID"] = {"prefix": "",
                                     "str_id": "",
                                     "int_id": None}

        return tuple(row_list)
Example #16
0
    def create_aliases(self, routes):
        arcpy.AddMessage("creating road aliases")

        route_aliases = []

        for route in routes:
            # arcpy.AddMessage("acting on {}".format(route))
            where_clause = "{} = '{}'".format(
                arcpy.AddFieldDelimiters(
                    self._output._intermediate_singlepart_data,
                    self._fields.route_name), route)
            with SearchCursor(
                    in_table=self._output._intermediate_singlepart_data,
                    field_names=(self._fields.to_milepost,
                                 self._fields.from_milepost,
                                 self._fields.pre_dir, self._fields.suffix_dir,
                                 self._fields.street_name,
                                 self._fields.street_type,
                                 self._fields.address_grid, self._fields.alias,
                                 self._fields.alias_type, self._fields.alias2,
                                 self._fields.alias2_type,
                                 self._fields.acs_name,
                                 self._fields.acs_suffix,
                                 self._fields.county_name,
                                 self._fields.city_name),
                    where_clause=where_clause,
                    sql_clause=(None, "ORDER BY {}".format(
                        self._fields.from_milepost))) as cursor:
                for row in cursor:
                    to_milepost = self._set_milepost_value(row[0])
                    from_milepost = self._set_milepost_value(row[1])

                    for alias in self._alias.alias_combination.keys():
                        alias_key = AliasKey(
                            row[self._get_index_from_key(
                                self._fields.pre_dir)],
                            row[self._get_index_from_key(
                                self._fields.suffix_dir)],
                            row[self._get_index_from_key(
                                self._fields.street_type)],
                            row[self._get_index_from_key(
                                self._fields.street_name)],
                            row[self._get_index_from_key(self._fields.alias)],
                            row[self._get_index_from_key(
                                self._fields.alias_type)],
                            row[self._get_index_from_key(self._fields.alias2)],
                            row[self._get_index_from_key(
                                self._fields.alias2_type)],
                            row[self._get_index_from_key(
                                self._fields.acs_name)],
                            row[self._get_index_from_key(
                                self._fields.acs_suffix)],
                            row[self._get_index_from_key(
                                self._fields.county_name)],
                            row[self._get_index_from_key(
                                self._fields.city_name)])

                        route_aliases.append(
                            RoadAlias(route, from_milepost, to_milepost,
                                      alias_key, alias))

        return route_aliases
def RoadNameRepair(optionsObject):

    # Adapted from a previous script created 2014-05-22
    # by Dirk Talley, which was called
    # Pre_Accident_Geocoding_Update_Cursor.py

    # This is an update cursor meant to clean up the road names in accident data.
    # It takes the road name information in the target feature set and reformats
    # it in the hope that the newly formatted data will have a higher match rate
    # when geocoded, without the introduction of any new errors.

    # For 2009 data in particular, it checks the information in the road database
    # and performs a check to see if there is a partial match on the 6-or-less
    # character road names with the information in the roads table for that
    # county. If it finds one, and only one, match, it changes the output road name
    # from the 6-or-less character name to the full (assumed) name.

    # If you get a "string or buffer expected" error message,
    # it is probably due to the script attempting a pattern match
    # on a None-type (<Null> in Arcmap) data entry in the table.
    # Make sure that you check the data for None-type entries.

    # The Fifth and Sixth string matching sections
    # no longer seem to take nearly as long as they
    # had previously. I ascribe this to the usage of
    # .da cursors and the "in_memory" workspace.

    try:
        # Set the environment
        env.workspace = "in_memory"

        # Set other variables
        uniqueRoadNamesTable = r"in_memory\UniqueRoadNames_Sorted"  ## Why not use the sorted table?
        uniqueRoadNamesTableFields = ["RD"]
        roadNamesList = list()
        accidentData = optionsObject.accidentDataTable

        try:
            oid_fieldname = Describe(accidentData).OIDFieldName
            print "The oid_fieldname is: " + oid_fieldname
        except:
            oid_fieldname = "ESRI_OID"

        if optionsObject.useKDOTFields == True:
            AddMessage('Checking KDOT Fields.')

            accidentCursorFields = [
                oid_fieldname, "COUNTY_NBR", "ON_ROAD_KDOT_NAME",
                "ON_ROAD_KDOT_TYPE", "ON_ROAD_KDOT_SFX_DIR",
                "AT_ROAD_KDOT_NAME", "AT_ROAD_KDOT_TYPE",
                "AT_ROAD_KDOT_SFX_DIR", "ON_AT_ROAD_KDOT_INTERSECT",
                "ACCIDENT_KEY"
            ]

        else:
            AddMessage('Checking LEO Fields.')

            accidentCursorFields = [
                oid_fieldname, "COUNTY_NBR", "ON_ROAD_NAME", "ON_ROAD_TYPE",
                "ON_ROAD_SUFFIX_DIRECTION", "AT_ROAD_NAME", "AT_ROAD_TYPE",
                "AT_ROAD_SUFFIX_DIRECTION", "ON_AT_ROAD_INTERSECT",
                "ACCIDENT_KEY"
            ]

        onRoadName = ""
        atRoadName = ""

        ## Should make a new table, not use the same one and update it.

        # Create a new search cursor to read in the data from
        # the uniqueRoadNames table.

        # Create a new search cursor to get road names.
        sCursor = SearchCursor(uniqueRoadNamesTable,
                               uniqueRoadNamesTableFields)

        # The table used for this cursor should come from
        # the CreateUniqueRoadNameTable function included
        # in this script.

        # If the base roads feature layer is updated, or otherwise changes
        # the uniqueRoadNamesTable will need to be run again.

        for sRow in sCursor:
            # Place the data into a 2-part list, with the pairs being County Number and Road Name as strings,
            # with County Number padded to 3 spaces with leading zeroes.
            #print "roadNamesListPart = " + str(sRow)
            roadNamesListPart = list(sRow)

            # Append the current county number and the current road name to the county number/road name list.
            roadNamesList.append(roadNamesListPart)

        try:
            del sCursor
        except:
            pass

        ####################################################
        # This script will now begin updates based on      #
        # eight patterns. The patterns are checked against #
        # data strings in the target accident data layer.  #
        # If it find matches, it attempts to make          #
        # updates and/or corrections to the data. If there #
        # is a problem with this script, please uncomment  #
        # the print statements to better watch the         #
        # program flow.                                    #
        ####################################################

        # Create the regex patterns to use in the next part,
        # with the update cursor.

        firstLetterMatchString = re.compile(r'C\\+\s+|C\\+|C/+\s+|C/+',
                                            re.IGNORECASE)
        firstNumberMatchString = re.compile(r'^C+[0-9]|^C+\s+[0-9]',
                                            re.IGNORECASE)
        secondMatchString = re.compile(r'^County Road [0-9]+/[ensw]',
                                       re.IGNORECASE)
        thirdMatchString = re.compile(r'[0-9]+[rnts][dht][a-z][a-z]',
                                      re.IGNORECASE)
        fourthMatchString = re.compile(r'[0-9]+[rnts][dht]/[ensw]',
                                       re.IGNORECASE)
        # Just a placeholder, the actual fifthMatchString pattern is generated
        # based on data retrieved within the accident table search cursor.
        fifthMatchString = re.compile(r'^WEST', re.IGNORECASE)
        # Just a placeholder, the actual sixthMatchString pattern is generated
        # based on data retrieved within the accident table search cursor.
        sixthMatchString = re.compile(r'^EAST', re.IGNORECASE)
        seventhMatchString = re.compile(r'^[0-9]+\s+[t][h]', re.IGNORECASE)
        eighthMatchString = re.compile(r'^[0-9]+[t][h][s]', re.IGNORECASE)
        atMatch = None
        orMatch = None

        accListDict = dict()

        # Create a new update cursor for the input feature class.

        # Use row[2], row.OR_TYPE, row[4]
        # or row[5], row[6], row[7]
        # to see if the names are already populating
        # the correct type and suffix fields for the
        # roads.

        # Replace all of these uCursors with an sCursor
        # then use a uCursor to update it
        # or use an iCursor to add them all back
        # into the table after truncating it.

        sCursor = SearchCursor(accidentData, accidentCursorFields)

        for sRow in sCursor:
            #print sRow[0]
            accListDict[sRow[0]] = list(sRow)

        try:
            del sCursor
        except:
            pass

        for accListItem in accListDict.values():
            #print str(roadListKey)

            # Perform updates here
            # Check the name of the roads and correct them.

            # Need to replace C\Q and C27 or (C 27) to County Road Q and County Road 27, respectively.
            # Erroneously encoded with a '\' rather than a '/' between the C and other
            # road name characters.

            if (accListItem[2] != None):
                orMatch = firstLetterMatchString.match(
                    accListItem[2])  # re.compile(r'C\\', re.IGNORECASE)
                if (orMatch != None):
                    ##AddMessage("Will replace the C\ in this OR_NAME2: " + str(accListItem[2]))
                    ##AddMessage(orMatch.end())
                    ##AddMessage("New OR_NAME2: COUNTY ROAD" + str(accListItem[2][orMatch.end():]))
                    accListItem[2] = "COUNTY ROAD " + accListItem[2][orMatch.
                                                                     end():]
                else:
                    pass
            else:
                pass

            if (accListItem[5] != None):
                atMatch = firstLetterMatchString.match(
                    accListItem[5])  # re.compile(r'C\\', re.IGNORECASE)
                if (atMatch != None):
                    ##AddMessage("Will replace the C\ in this AT_NAME2: " + str(accListItem[5]))
                    ##AddMessage(atMatch.end())
                    ##AddMessage("New AT_NAME2: COUNTY ROAD" + str(accListItem[5][atMatch.end():]))
                    accListItem[5] = "COUNTY ROAD " + accListItem[5][atMatch.
                                                                     end():]
                else:
                    pass
            else:
                pass

            accListDict[accListItem[0]] = accListItem

        for accListItem in accListDict.values():

            if (accListItem[2] != None):
                orMatch = firstNumberMatchString.match(
                    accListItem[2]
                )  # re.compile(r'^C[0-9]|^C+\s[0-9]', re.IGNORECASE)
                if (orMatch != None):
                    ##AddMessage("Will replace the C[0-9] or C+\s[0-9] in this OR_NAME2: " + str(accListItem[2]))
                    ##AddMessage(orMatch.end())
                    ##AddMessage("New OR_NAME2: COUNTY ROAD" + str(accListItem[2][orMatch.end():]))
                    accListItem[2] = "COUNTY ROAD " + accListItem[2][
                        orMatch.end() - 1:]
                else:
                    pass
            else:
                pass

            if (accListItem[5] != None):
                atMatch = firstNumberMatchString.match(
                    accListItem[5]
                )  # re.compile(r'^C[0-9]|^C+\s[0-9]', re.IGNORECASE)
                if (atMatch != None):
                    ##AddMessage("Will replace the C[0-9] or C+\s[0-9] in this AT_NAME2: " + str(accListItem[5]))
                    ##AddMessage(atMatch.end())
                    ##AddMessage("New AT_NAME2: COUNTY ROAD" + str(accListItem[5][atMatch.end():]))
                    accListItem[5] = "COUNTY ROAD " + accListItem[5][
                        atMatch.end() - 1:]
                else:
                    pass
            else:
                pass

            accListDict[accListItem[0]] = accListItem

            #print "After county name fix:"
            #print "accListDict[accListItem[0]]'s ON_ROAD_NAME & AT_ROAD_NAME  = " + str(accListDict[accListItem[0]][2]) + " & " + str(accListDict[accListItem[0]][5])

        AddMessage("####################################################")
        AddMessage("# End of First String Matching                     #")
        AddMessage("####################################################")

        for accListItem in accListDict.values():

            # Need to remove slashes, and if they have a
            # trailing directional make sure that it is
            # in the proper field.

            # Pattern matches one or more numbers, then
            # a forward slash, then a directional letter.
            if (accListItem[2] != None):
                orMatch = secondMatchString.match(
                    accListItem[2]
                )  # re.compile(r'^County Road [0-9]+/[ensw]', re.IGNORECASE)
                if (orMatch != None):
                    #print "Need to remove the slash and trailing directional from this OR_NAME2: ", accListItem[2]
                    #print "Match ended at: ", orMatch.end()
                    #print orMatch.group(0)[0:orMatch.end()-2] # The County Road without the slash and trailing directional -- Place this back in OR_NAME2
                    #print orMatch.group(0)[-2:-1] # The slash
                    #print orMatch.group(0)[-1:] # The trailing directional -- Check to see if this is the same as OR_SFX, if not, update OR_SFX
                    accListItem[2] = orMatch.group(0)[0:orMatch.end() - 2]

                    if (accListItem[4] != orMatch.group(0)[-1:]):
                        #print "OR_SFX does not match the trailing directional in OR_NAME2"
                        accListItem[4] = orMatch.group(0)[-1:]
                    else:
                        pass
                else:
                    pass
            else:
                pass

            if (accListItem[5] != None):
                atMatch = secondMatchString.match(
                    accListItem[5]
                )  # re.compile(r'^County Road [0-9]+/[ensw]', re.IGNORECASE)
                if (atMatch != None):
                    #print "Need to remove the slash and trailing directional from this AT_NAME2: ", accListItem[5]
                    #print "Match ended at: ", atMatch.end()
                    #print atMatch.group(0)[0:atMatch.end()-2] # The County Road without the slash and trailing directional -- Place this back in AT_NAME2
                    #print atMatch.group(0)[-2:-1] # The slash
                    #print atMatch.group(0)[-1:] # The trailing directional -- Check to see if this is the same as AT_SFX, if not, update AT_SFX
                    accListItem[5] = atMatch.group(0)[0:atMatch.end() - 2]
                    if (accListItem[7] != atMatch.group(0)[-1:]):
                        #print "AT_SFX does not match the trailing directional in AT_NAME2"
                        accListItem[7] = atMatch.group(0)[-1:]
                    else:
                        pass
                else:
                    pass
            else:
                pass

            accListDict[accListItem[0]] = accListItem

        AddMessage("####################################################")
        AddMessage("# End of Second String Matching                    #")
        AddMessage("####################################################")

        #print "At the end of third string matching, this is how the road names look:"

        atMatch = None
        orMatch = None

        for accListItem in accListDict.values():

            # Need to separate 2NDST, 14THST and similar ones.

            if (accListItem[2] != None):
                orMatch = thirdMatchString.match(
                    accListItem[2]
                )  # thirdMatchString = re.compile(r'[0-9]+[nts][dht][a-z][a-z]', re.IGNORECASE)
                if (orMatch != None):
                    #print "Need to change this from #NDST/#STST/#THST, to #ND/#ST/#TH and have ST in the OR_TYPE field: ", accListItem[2]
                    #print orMatch.end()
                    #print accListItem[2][0:orMatch.end()-2]
                    #print accListItem[2][-2:]
                    accListItem[2] = accListItem[2][0:orMatch.end() - 2]
                    if (accListItem[3] != orMatch.group(0)[-2:]):
                        #print "OR_TYPE does not match the TYPE erroneously concatenated in OR_NAME2"
                        #print "New OR_TYPE should be: ", accListItem[2][-2:]
                        accListItem[3] = orMatch.group(0)[-2:]
                    else:
                        pass
                else:
                    pass
            else:
                pass

            if (accListItem[5] != None):
                atMatch = thirdMatchString.match(
                    accListItem[5]
                )  # thirdMatchString = re.compile(r'[0-9]+[nts][dht][a-z][a-z]', re.IGNORECASE)
                if (atMatch != None):
                    #print "Need to change this from #NDST/#STST/#THST, to #ND/#ST/#TH and have ST in the AT_TYPE field: ", accListItem[5]
                    #print atMatch.end()
                    #print accListItem[5][0:atMatch.end()-2]
                    #print accListItem[5][-2:]
                    accListItem[5] = accListItem[5][0:atMatch.end() - 2]
                    if (accListItem[6] != atMatch.group(0)[-2:]):
                        #print "AT_TYPE does not match the TYPE erroneously concatenated in AT_NAME2"
                        #print "New AT_TYPE should be: ", accListItem[5][-2:]
                        accListItem[6] = atMatch.group(0)[-2:]
                    else:
                        pass
                else:
                    pass
            else:
                pass

            accListDict[accListItem[0]] = accListItem

            #print "ON_ROAD_NAME & AT_ROAD_NAME  = " + str(accListDict[accListItem[0]][2]) + " & " + str(accListDict[accListItem[0]][5])

        AddMessage("####################################################")
        AddMessage("# End of Third String Matching                     #")
        AddMessage("####################################################")

        atMatch = None
        orMatch = None

        for accListItem in accListDict.values():

            # Need to remove /S from 2ND/S, and similar.
            # Check to see if the trailing directional is in the proper field.
            # If not, update the field to be correct.

            if (accListItem[2] != None):
                orMatch = fourthMatchString.match(
                    accListItem[2]
                )  # fourthMatchString = re.compile(r'[0-9]+[nts][dht]/[ensw]', re.IGNORECASE)
                if (orMatch != None):
                    #print "Need to remove the slash and trailing directional from this OR_NAME2: ", accListItem[2]
                    #print "Match ended at: ", orMatch.end()
                    #print orMatch.group(0)[0:orMatch.end()-2] # The Street Name without the slash and trailing directional -- Place this back in OR_NAME2
                    #print orMatch.group(0)[-2:-1] # The slash
                    #print orMatch.group(0)[-1:] # The trailing directional -- Check to see if this is the same as OR_SFX, if not, update OR_SFX
                    accListItem[2] = orMatch.group(0)[0:orMatch.end() - 2]
                    if (accListItem[4] != orMatch.group(0)[-1:]):
                        #print "OR_SFX does not match the trailing directional in OR_NAME2"
                        accListItem[4] = orMatch.group(0)[-1:]
                    else:
                        pass
                else:
                    pass
            else:
                pass

            if (accListItem[5] != None):
                atMatch = fourthMatchString.match(
                    accListItem[5]
                )  # fourthMatchString = re.compile(r'[0-9]+[nts][dht]/[ensw]', re.IGNORECASE)
                if (atMatch != None):
                    #print "Need to remove the slash and trailing directional from this AT_NAME2: ", accListItem[5]
                    #print "Match ended at: ", atMatch.end()
                    #print atMatch.group(0)[0:atMatch.end()-2] # The Street Name without the slash and trailing directional -- Place this back in AT_NAME2
                    #print atMatch.group(0)[-2:-1] # The slash
                    #print atMatch.group(0)[-1:] # The trailing directional -- Check to see if this is the same as AT_SFX, if not, update AT_SFX
                    accListItem[5] = atMatch.group(0)[0:atMatch.end() - 2]
                    if (accListItem[7] != atMatch.group(0)[-1:]):
                        #print "AT_SFX does not match the trailing directional in AT_NAME2"
                        accListItem[7] = atMatch.group(0)[-1:]
                    else:
                        pass
                else:
                    pass
            else:
                pass

            accListDict[accListItem[0]] = accListItem

        AddMessage("####################################################")
        AddMessage("# End of Fourth String Matching                    #")
        AddMessage("####################################################")

        ### Fifth and Sixth String matching are more complex and
        ### will take more time to rebuild.

        ### But, I can probably remove some of the complexity
        ### by making sure that I'm only focused on one county
        ### at a time (Thus removing county checks)
        ### and by making sure that the years are selected for
        ### properly.

        atMatch = None
        orMatch = None

        for accListItem in accListDict.values():

            # If there are problems, try moving orMatch reinitialization here.
            # This cursor updates the on road name (ON_ROAD_NAME) for the
            # accident data if the data is from the year 2010 or before,
            # when the maximum field length for road names was increased from 6.

            if (accListItem[2] != None and
                (len(accListItem[2]) == 5 or len(accListItem[2]) == 6)):
                try:
                    accYear = accListItem[9][
                        0:4]  # Get the first 4 characters of the Accident_Key
                    accYear = int(accYear)  # Turn them into an integer.
                except:
                    accYear = 2000  # If there was a problem, assume the accident was from 2000.

                if (
                        accYear <= 2010
                ):  ## Replaced previous check with this check for accYears which are 2010 or less .

                    # The next line creates a regex pattern using the current row's AT_ROAD_NAME field
                    # as the pattern, ignoring case.

                    fifthMatchString = re.compile(
                        r'{}'.format(re.escape(accListItem[2])), re.IGNORECASE)
                    #print "This data about", accListItem[2], "is from", int(accListItem.YEAR)
                    roadMatchCounter = 0
                    for roadNamesItem in roadNamesList:
                        noSpacesRoadName = str(roadNamesItem[0]).replace(
                            ' ', '')
                        orMatch = fifthMatchString.match(noSpacesRoadName)
                        if (orMatch != None):
                            ##AddMessage( "Found a match for " + str(accListItem[2]) + "and " + str(roadNamesItem[0]) + ".")
                            roadMatchCounter += 1
                        else:
                            pass
                    # If there was only one match between the accident road name for that county and the
                    # unique road names for that county, replace the accident road name with the
                    # unique road name. -- Does another loop through the roadList to accomplish
                    # this. Probably not the most efficient way to do this, but it works.
                    if roadMatchCounter == 1:
                        #print "Exactly one road matched in this county. Road Matches: ", roadMatchCounter
                        for roadNamesItem in roadNamesList:
                            # Make sure that the length of the roadNamesItem's name is 6 or greater
                            # and that it is larger than the accListItem.
                            if len(roadNamesItem[0]) > 5 and len(
                                    roadNamesItem[0]) > len(accListItem[2]):
                                noSpacesRoadName = str(
                                    roadNamesItem[0]).replace(' ', '')
                                orMatch = fifthMatchString.match(
                                    noSpacesRoadName)
                                if (orMatch != None):
                                    AddMessage("Old on road name was: " +
                                               str(accListItem[2]))
                                    AddMessage(
                                        "New on road name will be corrected to: "
                                        + str(roadNamesItem[0]).upper())
                                    accListItem[2] = str(
                                        roadNamesItem[0]).upper()
                                else:
                                    pass
                            else:
                                pass

                    elif roadMatchCounter > 1:
                        AddMessage(
                            str(roadMatchCounter) +
                            " unique road names in this county matched the record's road name of: "
                            + str(accListItem[2]))
                        AddMessage(
                            "The record's road name will not be expanded.")
                        pass
                    else:
                        pass
                else:
                    pass
            else:
                pass

            accListDict[accListItem[0]] = accListItem

        AddMessage("####################################################")
        AddMessage("# End of Fifth String Matching                     #")
        AddMessage("####################################################")

        atMatch = None
        orMatch = None

        for accListItem in accListDict.values():

            # If there are problems, try moving atMatch reinitialization here.
            # This cursor updates the at road name (AT_ROAD_NAME) for the
            # accident data if the data is from the year 2010 or before, when the
            # maximum field length for road names was increased from 6.

            if (accListItem[5] != None and
                (len(accListItem[5]) == 5 or len(accListItem[5]) == 6)):
                try:
                    accYear = accListItem[9][
                        0:4]  # Get the first 4 characters of the Accident_Key
                    accYear = int(accYear)  # Turn them into an integer.
                except:
                    accYear = 2000  # If there was a problem, assume the accident was from 2000.

                if (
                        accYear <= 2010
                ):  ## Replaced previous check with this check for accYears which are 2010 or less.

                    # The next line creates a regex pattern using the current row's AT_ROAD_NAME field
                    # as the pattern, ignoring case.

                    sixthMatchString = re.compile(
                        r'{}'.format(re.escape(accListItem[5])), re.IGNORECASE)
                    #print "This data about", accListItem[5], "is from", int(accListItem.YEAR)
                    roadMatchCounter = 0
                    for roadNamesItem in roadNamesList:
                        # Removes all the spaces from the roadName, allowing
                        # for matching of UNIONC to UNION CHAPEL, LONETR to LONE TREE,
                        # TRICIT to TRI CITY, etc.
                        noSpacesRoadName = str(roadNamesItem[0]).replace(
                            ' ', '')
                        atMatch = sixthMatchString.match(noSpacesRoadName)
                        if (atMatch != None):
                            ##AddMessage("Found a match for " + str(accListItem[5]) +  " and "  + str(roadNamesItem[0]) + ".")
                            roadMatchCounter += 1
                        else:
                            pass
                    # If there was only one match between the accident road name for that county and the
                    # unique road names for that county, replace the accident road name with the
                    # unique road name. -- Does another loop through the roadList to accomplish
                    # this. Probably not the most efficient way to do this, but it works.
                    if roadMatchCounter == 1:
                        #print "Exactly one road matched in this county. Road Matches: ", roadMatchCounter
                        for roadNamesItem in roadNamesList:
                            # Make sure that the length of the roadNamesItem's name is 6 or greater
                            # and that it is larger than the accListItem.
                            if len(roadNamesItem[0]) > 5 and len(
                                    roadNamesItem[0]) > len(accListItem[5]):
                                noSpacesRoadName = str(
                                    roadNamesItem[0]).replace(' ', '')
                                atMatch = sixthMatchString.match(
                                    noSpacesRoadName)
                                if (atMatch != None):
                                    AddMessage("Old at road name was: " +
                                               str(accListItem[5]))
                                    AddMessage(
                                        "New at road name will be corrected to: "
                                        + str(roadNamesItem[0]).upper())
                                    accListItem[5] = str(
                                        roadNamesItem[0]).upper()
                                else:
                                    pass
                            else:
                                pass

                    elif roadMatchCounter > 1:
                        AddMessage(
                            str(roadMatchCounter) +
                            " unique road names in this county matched the record's road name of: "
                            + str(accListItem[5]))
                        AddMessage(
                            "The record's road name will not be expanded.")
                        pass
                    else:
                        pass
                else:
                    pass
            else:
                pass

            accListDict[accListItem[0]] = accListItem

        AddMessage("####################################################")
        AddMessage("# End of Sixth String Matching                     #")
        AddMessage("####################################################")

        for accListItem in accListDict.values():

            # Remove the extra space in roads with names like "5 TH" and "17 TH".
            # Also remove single and double quotes.

            if (accListItem[2] != None):
                accListItem[2] = accListItem[2].replace(
                    "'", "")  # Remove Single Quotes
                accListItem[2] = accListItem[2].replace(
                    '"', '')  # Remove Double Quotes
                orMatch = seventhMatchString.match(
                    accListItem[2]
                )  # re.compile(r'^[0-9]+\s[t][h]', re.IGNORECASE)
                if (orMatch != None):
                    AddMessage(str(accListItem[2]) + " will be changed to:")
                    accListItem[2] = orMatch.group(0)[0:orMatch.end(
                    ) - 3] + orMatch.group(0)[orMatch.end() - 2:orMatch.end()]
                    AddMessage(str(accListItem[2]))
                else:
                    pass
            else:
                pass

            if (accListItem[5] != None):
                accListItem[5] = accListItem[5].replace(
                    "'", "")  # Remove Single Quotes
                accListItem[5] = accListItem[5].replace(
                    '"', '')  # Remove Double Quotes
                atMatch = seventhMatchString.match(
                    accListItem[5]
                )  # re.compile(r'^[0-9]+\s[t][h]', re.IGNORECASE)
                if (atMatch != None):
                    AddMessage(str(accListItem[5]) + " will be changed to:")
                    accListItem[5] = atMatch.group(0)[0:atMatch.end(
                    ) - 3] + atMatch.group(0)[atMatch.end() - 2:atMatch.end()]
                    AddMessage(str(accListItem[5]))
                else:
                    pass
            else:
                pass

            accListDict[accListItem[0]] = accListItem

        AddMessage("####################################################")
        AddMessage("# End of Seventh String Matching                   #")
        AddMessage("####################################################")

        for accListItem in accListDict.values():

            # Remove the extra space in roads with names like "5 TH" and "17 TH".
            # Also remove single and double quotes.

            if (accListItem[2] != None):
                orMatch = eighthMatchString.match(
                    accListItem[2]
                )  # re.compile(r'^[0-9]+[t][h][s]', re.IGNORECASE)
                if (orMatch != None):
                    AddMessage(str(accListItem[2]) + " will be changed to:")
                    accListItem[2] = orMatch.group(0)[0:orMatch.end() - 1]
                    AddMessage(str(accListItem[2]))
                else:
                    pass
            else:
                pass

            if (accListItem[5] != None):
                atMatch = eighthMatchString.match(
                    accListItem[5]
                )  # re.compile(r'^[0-9]+[t][h][s]', re.IGNORECASE)
                if (atMatch != None):
                    AddMessage(str(accListItem[5]) + " will be changed to:")
                    accListItem[5] = atMatch.group(0)[0:atMatch.end() - 1]
                    AddMessage(str(accListItem[5]))
                else:
                    pass
            else:
                pass

            accListDict[accListItem[0]] = accListItem

        AddMessage("####################################################")
        AddMessage("# End of Eighth String Matching                    #")
        AddMessage("####################################################")

        AddMessage("####################################################")
        AddMessage("# Rebuilding Intersection Names                    #")
        AddMessage("####################################################")

        for accListItem in accListDict.values():

            # Rebuild the intersection names in the form of:
            # onRoadName + " | " + atRoadName

            onRoadName = ""
            atRoadName = ""

            if accListItem[2] != None:
                onRoadName = str(accListItem[2])
            else:
                pass

            if accListItem[5] != None:
                atRoadName = str(accListItem[5])
            else:
                pass

            if onRoadName != None and atRoadName != None:
                accListItem[8] = str(onRoadName + " | " + atRoadName)
            else:
                pass

            accListDict[accListItem[0]] = accListItem

        AddMessage("####################################################")
        AddMessage("# Intersection Names Rebuilt                       #")
        AddMessage("####################################################")

        ### Don't forget to add accident_key to the list of sCursor
        ### fields. Need it to properly select which accidents
        ### need their road names un-truncated.

        AddMessage("####################################################")
        AddMessage("# Applying Changes with an Update Cursor           #")
        AddMessage("####################################################")

        uCursor = UpdateCursor(accidentData, accidentCursorFields)

        for uCursorRow in uCursor:
            try:
                accListItem = accListDict[uCursorRow[0]]
                uCursor.updateRow(accListItem)
            except:
                AddMessage("An error occured while updating.")
        try:
            del uCursor
        except:
            pass

        AddMessage("####################################################")
        AddMessage("# Update completed.                                #")
        AddMessage("####################################################")

    except Exception as newException:
        print str(newException)
        del newException

    finally:
        try:
            del sCursor
        except:
            pass
        try:
            del uCursor
        except:
            pass