Ejemplo n.º 1
0
def calculatePointElevationField(points, raster, field_name):

    #monitor progress by counting features
    view = MakeTableView_management(points, 'points')
    count = int(GetCount_management('points').getOutput(0))
    SetProgressor('step', 'Extracting point elevations', 0, count)
    AddMessage('{} features to process'.format(count))

    # Get the object id field
    oid = Describe(points).OIDFieldName

    # make an update cursor and update each row's elevation field
    cursor = UpdateCursor(points, [field_name, 'SHAPE@', oid])

    # make a temporary dict to store our elevation values we extract
    elevations = {}

    for row in cursor:
        row[0] = getElevationAtPoint(raster, row[1])
        cursor.updateRow(row)
        AddMessage('row updated to {}; oid: {}'.format(row[0], row[2]))
        SetProgressorPosition()

    # release the data
    del cursor

    #reset this progressor
    ResetProgressor()
Ejemplo n.º 2
0
def calculatePointElevationField(points, raster, field_name):

    #monitor progress by counting features
    view = MakeTableView_management(points, 'points')
    count = int(GetCount_management('points').getOutput(0))
    SetProgressor('step', 'Extracting point elevations', 0, count)
    AddMessage('{} features to process'.format(count))

    # Get the object id field
    oid = Describe(points).OIDFieldName

    # make an update cursor and update each row's elevation field
    cursor = UpdateCursor(points, [field_name, 'SHAPE@', oid])

    # make a temporary dict to store our elevation values we extract
    elevations = {}

    for row in cursor:
        row[0] = getElevationAtPoint(raster, row[1])
        cursor.updateRow(row)
        AddMessage('row updated to {}; oid: {}'.format(row[0], row[2]))
        SetProgressorPosition()

    # release the data
    del cursor

    #reset this progressor
    ResetProgressor()
Ejemplo n.º 3
0
def geocodeParsedTable():
    setGlobals()
    table = r""  #define your table here
    streetField = ""  #define your street address field name here
    cityField = ""  #define your city field here
    stateField = ""  #define your state field here
    zipField = ""  #define your zip code field here

    latField = ""  #define your latitude field (the Y field). This field should already exist in the table.
    longField = ""  #define your longitude field (the X field). This field should already exist in the table.

    fields = (latField, longField, streetField, cityField, stateField,
              zipField)

    with UpdateCursor(table, fields) as rows:
        for row in rows:
            address = {
                'Street': row[2],
                'City': row[3],
                'State': row[4],
                'Zip': row[5]
            }  #creates a dictionary of the address
            xy = geoCodeAddress(address)
            if xy != None:
                row[0] = xy[1]  #sets latitude value
                row[1] = xy[0]  #sets longitude value
                rows.updateRow(row)  #updates the row
def add_iso_attributes():
    """Append attributes from the original max stops data to the
    isochrones feature class, matching features stop id's field
    (which are in the 'stop_id' and 'name' fields
    """

    rail_stop_dict = dict()
    s_fields = [ID_FIELD, STOP_FIELD, ROUTES_FIELD, ZONE_FIELD, YEAR_FIELD]
    with SearchCursor(MAX_STOPS, s_fields) as s_cursor:
        sid_ix = s_cursor.fields.index(ID_FIELD)
        
        for row in s_cursor:
            stop_id = row[sid_ix]
            rail_stop_dict[stop_id] = list(row)

    # area value will be used to check for errors in isochrone creation
    iso_fields = [f.name for f in ListFields(ISOCHRONES)]
    area_field = 'area'
    if area_field not in iso_fields:
        f_type = 'DOUBLE'
        AddField(ISOCHRONES, area_field, f_type)
    
    area_val = 'SHAPE@AREA'
    u_fields = s_fields + [area_field, area_val]
    with UpdateCursor(ISOCHRONES, u_fields) as u_cursor:
        sid_ix = u_cursor.fields.index(ID_FIELD)
        val_ix = u_cursor.fields.index(area_val)
        
        for row in u_cursor:
            stop_id = row[sid_ix]
            area = row[val_ix]
            
            i_row = rail_stop_dict[stop_id]
            i_row.extend([area, area])
            u_cursor.updateRow(i_row)
Ejemplo n.º 5
0
 def post_to_table(self):
     table_fields = ["Installation", "rpsuid", "Feature_Type", "Status"]
     inst_f = AddFieldDelimiters(self.table, table_fields[0])
     site_f = AddFieldDelimiters(self.table, table_fields[1])
     ft_f = AddFieldDelimiters(self.table, table_fields[2])
     try:
         with Editor(os.path.split(self.table)[0]) as _:
             for inst in self.__layers:
                 for site in self.__layers[inst]:
                     for layer in self.__layers[inst][site]:
                         status = self.__layers[inst][site][layer]
                         with UpdateCursor(self.table, table_fields[3], where_clause="{0}='{1}' AND {2}='{3}' AND {4}='{5}'".format(
                                 inst_f, str(inst), site_f, str(site), ft_f, layer)) as cursor:
                             row_count = 0
                             for row in cursor:
                                 row[0] = str(status)
                                 cursor.updateRow(row)
                                 row_count += 1
                             if not row_count:
                                 with InsertCursor(self.table, table_fields) as insert:
                                     insert.insertRow([str(inst), str(site), layer, str(status)])
         return True
     except Exception as e:
         self.log.exception(e.message)
         raise Exit("Failed from LayerStatus.post_to_table")
Ejemplo n.º 6
0
    def __write_result_to_table(self, list_of_attributes):
        """

        :rtype: bool
        :type list_of_attributes: list of lists of attributes
        """
        layer_f = AddFieldDelimiters(self.out_table, "Layer")
        field_name_f = AddFieldDelimiters(self.out_table, "Field_Name")
        inst_f = AddFieldDelimiters(self.out_table, "installationID")
        with Editor(self.db) as _:
            try:
                for attributes in list_of_attributes:
                    with UpdateCursor(self.out_table,
                                      ["Layer", "Field_Name", "Quality", "Domain_Name", "QAP_Required",
                                       "installationID"],
                                      where_clause="{0}='{1}' AND {2}='{3}' AND {4}='{5}'".format(layer_f,
                                                                                                  str(attributes[0]),
                                                                                                  field_name_f,
                                                                                                  str(attributes[1]),
                                                                                                  inst_f,
                                                                                                  str(attributes[5]))) as cursor:
                        for _ in cursor:
                            row = attributes
                            cursor.updateRow(row)
                            break
                        else:
                            with InsertCursor(self.out_table,
                                              ["Layer", "Field_Name", "Quality", "Domain_Name", "QAP_Required",
                                               "installationID"]) as c:
                                c.insertRow(attributes)
                return True
            except Exception as e:
                self.log.exception(e)
                raise Exit()
def extract_attachments(att_table, out_folder, att_field='file_name'):

    fields = ['DATA', 'ATT_NAME', 'ATTACHMENTID', att_field]

    # check for existence of required fields
    has_fields = [f.name for f in ListFields(att_table)]
    for f in fields:
        if f not in has_fields:
            AddError('Field {} is required in attribute table'.format(f))

    # verify path
    verify_path_exists(out_folder)

    with UpdateCursor(att_table, fields) as cursor:
        for row in cursor:

            # get the attachment file and create a filename
            attachment = row[0]
            filename = 'ATT_{2}_{1}'.format(*row)

            # write the output file and update the row's value to the file name
            open(join(out_folder, filename), 'wb').write(attachment.tobytes())
            row[3] = filename
            cursor.updateRow(row)

            # cleanup
            del row
            del filename
            del attachment
Ejemplo n.º 8
0
 def push(self):
     with Editor(self.db) as _:
         try:
             rpuid_f = AddFieldDelimiters(self.out_table, "rpuid")
             me_f = AddFieldDelimiters(self.out_table, "manual_entry")
             fields = ["SHAPE@", "rpsuid", "rpuid", "Feature_Type", "manual_entry", "installation", "username"]
             attributes = [self.shape, self.rpsuid, self.rpuid, self.ft, self.m_e, self.inst, self.username]
             with UpdateCursor(self.out_table, fields,
                               where_clause="{0}='{1}' AND {2}='{3}'".format(rpuid_f, str(attributes[2]),
                                                                             me_f, str(attributes[4]))) as CURSOR:
                 row_count = 0
                 for _ in CURSOR:
                     row_count += 1
                     if row_count == 1:
                         row = attributes
                         CURSOR.updateRow(row)
                     else:
                         # Deletes extra rows that match the SQL clause
                         CURSOR.deleteRow()
                 if not row_count:
                     with InsertCursor(self.out_table, fields) as c:
                         c.insertRow(attributes)
             return True
         except Exception as e:
             self.log.exception(e)
             raise Exit()
def assign_max_zones():
    """Add an attribute to max stops that indicates which 'MAX Zone' it
    falls within, the max_zone feature class is used in conjunction with
    max stops to make this determination
    """

    # Create a mapping from zone object id's to their names
    max_zone_dict = dict()
    fields = ['OID@', UNIQUE_FIELD]
    with SearchCursor(MAX_ZONES, fields) as cursor:
        for oid, name in cursor:
            max_zone_dict[oid] = name

    # Find the nearest zone to each stop
    stop_zone_table = join(TEMP_DIR, 'stop_zone_near_table.dbf')
    GenerateNearTable(MAX_STOPS, MAX_ZONES, stop_zone_table)

    # Create a mapping from stop oid's to zone oid's
    stop2zone = dict()
    fields = ['IN_FID', 'NEAR_FID']
    with SearchCursor(stop_zone_table, fields) as cursor:
        for stop_oid, zone_oid in cursor:
            stop2zone[stop_oid] = zone_oid

    f_type = 'TEXT'
    AddField(MAX_STOPS, ZONE_FIELD, f_type)

    fields = ['OID@', ZONE_FIELD]
    with UpdateCursor(MAX_STOPS, fields) as cursor:
        for oid, zone in cursor:
            zone = max_zone_dict[stop2zone[oid]]

            cursor.updateRow((oid, zone))
Ejemplo n.º 10
0
def update_layer(layer, field, value, new_value):
    if field and value:
        where_clause = "{} = '{}'".format(field, value)
    else:
        where_clause = None
    try:
        cursor = UpdateCursor(layer, field, where_clause)
    except (TypeError):
        return "Error loading table {}".format(layer)

    try:
        for row in cursor:
            row[0] = new_value
            cursor.updateRow(row)
    except (RuntimeError):
        del cursor
        return "Error modifying table {}".format(layer)

    return "Layer Updated: {}".format(layer)
Ejemplo n.º 11
0
def update_layer(layer, field, value, new_value):
    if (field and value):
        where_clause = '{} = \'{}\''.format(field, value)
    else:
        where_clause = None
    try:
        cursor = UpdateCursor(layer, field, where_clause)
    except (TypeError):
        return "Error loading table {}".format(layer)

    try:
        for row in cursor:
            row[0] = new_value
            cursor.updateRow(row)
    except (RuntimeError):
        del cursor
        return "Error modifying table {}".format(layer)

    return 'Layer Updated: {}'.format(layer)
Ejemplo n.º 12
0
    def edit_version(self, connection_file: str):

        records = self._edit()
        if records:
            log.debug("Writing edited rows to a csv...")
            csv_file = f'.\\facilityid\\log\\{self.feature_name}_Edits.csv'
            write_to_csv(csv_file, records)

            self.add_edit_metadata()

            guid_facid = {x['GLOBALID']: x["NEWFACILITYID"] for x in records}
            if connection_file:
                edit_conn = os.path.join(connection_file, *self.tuple_path[1:])
                try:
                    # Start an arc edit session
                    log.debug("Entering an arc edit session...")
                    editor = Editor(connection_file)
                    editor.startEditing(False, True)
                    editor.startOperation()

                    log.debug("Filtering the table to editted records only...")
                    # Query only the entries that need editing
                    guids = ", ".join(f"'{x}'" for x in guid_facid.keys())
                    query = f"GLOBALID IN ({guids})"

                    # Open an update cursor and perform edits
                    log.debug("Opening an update cursor to perform edits...")
                    fields = ["GLOBALID", "FACILITYID"]
                    with UpdateCursor(edit_conn, fields, query) as cursor:
                        for row in cursor:
                            row[1] = guid_facid[row[0]]
                            cursor.updateRow(row)

                    # Stop the edit operation
                    log.debug("Closing the edit session...")
                    editor.stopOperation()
                    editor.stopEditing(True)
                    del editor
                    ClearWorkspaceCache_management()

                    log.info(("Successfully performed versioned edits on "
                              f"{self.feature_name}..."))
                    # Reset the aprx connection to the versioned connection
                    self.aprx_connection = edit_conn
                    self.version_name = os.path.basename(
                        connection_file).strip(".sde")
                    self.add_to_aprx()
                except RuntimeError:
                    log.exception(("Could not perform versioned edits "
                                   f"on {self.feature_name}..."))
            log.debug("Logging edits to csv file containing all edits ever...")
            all_edits = r'.\\facilityid\\log\\AllEditsEver.csv'
            write_to_csv(all_edits, records)
        else:
            log.info("No edits were necessary...")
Ejemplo n.º 13
0
    def extract_popup_info(self):
        """Extract data from the PopupInfo field to new fields."""

        # Make sure we have a feature class with a PopupInfo field.
        if not arcpy.Exists(self.fc):
            self.raise_error('Feature class {} not found.'.format(self.fc))
        popup_field_name = 'PopupInfo'
        if not self.field_exists(popup_field_name):
            self.raise_error('No {} field found in {}.'.format(popup_field_name, self.fc))

        # Find out what fields are in the popup.
        search_cursor = SearchCursor(self.fc, popup_field_name)
        first_row = search_cursor.next()
        sample_html = first_row[0]
        popup_dict = self.get_popup_dict(sample_html)
        popup_fields = list(popup_dict.keys())
        del first_row
        del search_cursor

        # Add new fields as necessary.
        fc_name = os.path.basename(self.fc)
        for field_name in popup_fields:
            if not self.field_exists(field_name):
                self.info('Adding field "{}" to {}.'.format(field_name, fc_name))
                try:
                    arcpy.management.AddField(self.fc, field_name, 'TEXT')
                except Exception as e:
                    self.raise_error('Failed to add field. {}'.format(e))

        # Populate the fields.
        self.info('Updating attributes.')
        update_fields = popup_fields + [popup_field_name]
        cursor = UpdateCursor(self.fc, update_fields)
        for row in cursor:
            popup_html = row[-1]
            popupdict = self.get_popup_dict(popup_html)
            for field_index, field_name in enumerate(popup_fields):
                row[field_index] = popupdict.get(field_name, None)
            cursor.updateRow(row)
        del cursor
Ejemplo n.º 14
0
def geocodeSingleLineInputTable():
    setGlobals()
    table = r"" #define your table here
    SingleLineInputField = "" #define your single line input field here

    latField = "" #define your latitude field (the Y field). This field should already exist in the table.
    longField = "" #define your longitude field (the X field). This field should already exist in the table.
    fields = (latField, longField, SingleLineInputField)

    with UpdateCursor(table, fields) as rows:
        for row in rows:
            address = {'SingleLine':row[2]}
            xy = geoCodeAddress(address)
            if xy != None:
                row[0] = xy[1] #sets latitude value
                row[1] = xy[0] #sets longitude value
                rows.updateRow(row) #updates the row
def add_name_field():
    """Only a field called 'name' will be retained when locations are
    loaded into a service area analysis, as the MAX stops will be.  
    This field is populated that field with unique identifiers so that
    the other attributes from this data can be linked to the network 
    analyst output
    """

    fields = [f.name for f in ListFields(MAX_STOPS)]

    if UNIQUE_FIELD not in fields:
        f_type = 'LONG'
        AddField(MAX_STOPS, UNIQUE_FIELD, f_type)
    
        u_fields = [ID_FIELD, UNIQUE_FIELD]
        with UpdateCursor(MAX_STOPS, u_fields) as cursor:
            for stop_id, name in cursor:
                name = stop_id
                cursor.updateRow((stop_id, name))
Ejemplo n.º 16
0
def _trim_shoreline(islands):
    """
    Trim the shoreline of micro-islands. This makes 
    permanent changes to the Shapefile.
    """
    for island in islands:
        path = path_to_shoreline(island)
        pair = max(TableToNumPyArray(path, ["OID@", "SHAPE@AREA"]),
                   key=lambda p: p[1])
        with UpdateCursor(path, ["OID@", "SHAPE@"]) as cursor:
            for row in cursor:
                if row[0] != pair[0]:
                    cursor.deleteRow()
                else:
                    row_new = Array()
                    for part in row[1]:
                        part_new = Array()
                        for point in part:
                            if point is None:
                                break
                            part_new.add(point)
                        row_new.add(part_new)
                    row[1] = Polygon(row_new)
                    cursor.updateRow(row)
def add_inception_year():
    """Each MAX line has a decision to build year, add that information
    as an attribute to the max stops.  If a max stop serves multiple
    lines the year from the oldest line will be assigned.
    """

    f_type = 'LONG'
    AddField(MAX_STOPS, YEAR_FIELD, f_type)

    # Note that 'MAX Year' for stops within the CBD are variable as
    # stops within that region were not all built at the same time
    # (this is not the case for all other MAX zones)
    fields = [ID_FIELD, DESC_FIELD, ZONE_FIELD, YEAR_FIELD]
    with UpdateCursor(MAX_STOPS, fields) as cursor:
        for stop_id, rte_desc, zone, year in cursor:
            if 'MAX Blue Line' in rte_desc \
                    and zone not in ('West Suburbs', 'Southwest Portland'):
                year = 1980
            elif 'MAX Blue Line' in rte_desc:
                year = 1990
            elif 'MAX Red Line' in rte_desc:
                year = 1997
            elif 'MAX Yellow Line' in rte_desc \
                    and zone != 'Central Business District':
                year = 1999
            elif 'MAX Green Line' in rte_desc:
                year = 2003
            elif 'MAX Orange Line' in rte_desc:
                year = 2008
            else:
                print 'Stop {} not assigned a MAX Year, cannot proceed ' \
                      'with out this assignment, examine code/data for ' \
                      'errors'.format(stop_id)
                exit()

            cursor.updateRow((stop_id, rte_desc, zone, year))
Ejemplo n.º 18
0
def RoadNameRepair(optionsObject):
    
    # Adapted from a previous script created 2014-05-22
    # by Dirk Talley, which was called
    # Pre_Accident_Geocoding_Update_Cursor.py
    
    # This is an update cursor meant to clean up the road names in accident data.
    # It takes the road name information in the target feature set and reformats
    # it in the hope that the newly formatted data will have a higher match rate
    # when geocoded, without the introduction of any new errors.
    
    # For 2009 data in particular, it checks the information in the road database
    # and performs a check to see if there is a partial match on the 6-or-less
    # character road names with the information in the roads table for that
    # county. If it finds one, and only one, match, it changes the output road name
    # from the 6-or-less character name to the full (assumed) name.
    
    # If you get a "string or buffer expected" error message,
    # it is probably due to the script attempting a pattern match
    # on a None-type (<Null> in Arcmap) data entry in the table.
    # Make sure that you check the data for None-type entries.
    
    # The Fifth and Sixth string matching sections 
    # no longer seem to take nearly as long as they
    # had previously. I ascribe this to the usage of
    # .da cursors and the "in_memory" workspace.
    
    try:
        # Set the environment
        env.workspace = "in_memory"
        
        # Set other variables
        uniqueRoadNamesTable = r"in_memory\UniqueRoadNames"
        uniqueRoadNamesTableFields = ["RD"]
        roadNamesList = list()
        accidentData = optionsObject.accidentDataTable 
        
        if optionsInstance.useKDOTFields == True:
            AddMessage('Using KDOT Fields.')
            accidentCursorFields = ["ESRI_OID", "COUNTY_NBR", "ON_ROAD_KDOT_NAME", "ON_ROAD_KDOT_TYPE", "ON_ROAD_KDOT_SFX_DIR",
                                    "AT_ROAD_KDOT_NAME", "AT_ROAD_KDOT_TYPE", "AT_ROAD_KDOT_SFX_DIR", "ON_AT_ROAD_KDOT_INTERSECT",
                                    "ACCIDENT_KEY"]
        else:
            accidentCursorFields = ["ESRI_OID", "COUNTY_NBR", "ON_ROAD_NAME", "ON_ROAD_TYPE", "ON_ROAD_SUFFIX_DIRECTION",
                                    "AT_ROAD_NAME", "AT_ROAD_TYPE", "AT_ROAD_SUFFIX_DIRECTION", "ON_AT_ROAD_INTERSECT",
                                    "ACCIDENT_KEY"]
        
        #accidentCursorFields = ["ESRI_OID", "COUNTY_NBR", "ON_ROAD_NAME", "ON_ROAD_TYPE", "ON_ROAD_SUFFIX_DIRECTION",
        #                        "AT_ROAD_NAME", "AT_ROAD_TYPE", "AT_ROAD_SUFFIX_DIRECTION", "ON_AT_ROAD_INTERSECT",
        #                        "ACCIDENT_KEY"]
        
        onRoadName = ""
        atRoadName = ""
        
        ## Should make a new table, not use the same one and update it.
        
        # Create a new search cursor to read in the data from
        # the uniqueRoadNames table.
        
        # Create a new search cursor to get road names.
        sCursor = SearchCursor(uniqueRoadNamesTable, uniqueRoadNamesTableFields)
        
        # The table used for this cursor should come from
        # the CreateUniqueRoadNameTable function included
        # in this script.
        
        # If the base roads feature layer is updated, or otherwise changes
        # the uniqueRoadNamesTable will need to be run again.
        
        for sRow in sCursor:
            # Place the data into a 2-part list, with the pairs being County Number and Road Name as strings,
            # with County Number padded to 3 spaces with leading zeroes.
            #print "roadNamesListPart = " + str(sRow)
            roadNamesListPart = list(sRow)
            
            # Append the current county number and the current road name to the county number/road name list.
            roadNamesList.append(roadNamesListPart)
        
        try:
            del sCursor
        except:
            pass
        
        ####################################################
        # This script will now begin updates based on      #
        # seven patterns. The patterns are checked against #
        # data strings in the target accident data layer.  #
        # If it find matches, it attempts to make          #
        # updates and/or corrections to the data. If there #
        # is a problem with this script, please uncomment  #
        # the print statements to better watch the         #
        # program flow.                                    #
        ####################################################
        
        # Create the regex patterns to use in the next part,
        # with the update cursor.
        
        firstMatchString = re.compile(r'C\\', re.IGNORECASE)
        secondMatchString = re.compile(r'^County Road [0-9]+/[ensw]', re.IGNORECASE)
        thirdMatchString = re.compile(r'[0-9]+[rnts][dht][a-z][a-z]', re.IGNORECASE)
        fourthMatchString = re.compile(r'[0-9]+[rnts][dht]/[ensw]', re.IGNORECASE)
        # Just a placeholder, the actual fifthMatchString pattern is generated
        # based on data retrieved within the accident table search cursor.
        fifthMatchString = re.compile(r'^WEST', re.IGNORECASE)
        # Just a placeholder, the actual sixthMatchString pattern is generated
        # based on data retrieved within the accident table search cursor.
        sixthMatchString = re.compile(r'^EAST', re.IGNORECASE)
        seventhMatchString = re.compile(r'^[0-9]+\s[t][h]', re.IGNORECASE)
        atMatch = None
        orMatch = None
        
        accListDict = dict()
        
        # Create a new update cursor for the input feature class.
        
        # Use row[2], row.OR_TYPE, row[4]
        # or row[5], row[6], row[7]
        # to see if the names are already populating
        # the correct type and suffix fields for the
        # roads.
        
        # Replace all of these uCursors with an sCursor
        # then use a uCursor to update it
        # or use an iCursor to add them all back
        # into the table after truncating it.
        
        
        sCursor = SearchCursor(accidentData, accidentCursorFields)
        
        for sRow in sCursor:
            #print sRow[0]
            accListDict[sRow[0]] = list(sRow)
        
        try:
            del sCursor
        except:
            pass
        
        
        for accListItem in accListDict.values():
            #print str(roadListKey)
            
            # Perform updates here
            # Check the name of the roads and correct them.
            
            # Need to expand C\Q and C\27 to County Road Q and County Road 27, respectively.
            # Erroneously encoded with a '\' rather than a '/' between the C and other
            # road name characters.
            
            if (accListItem[2] != None):
                orMatch = firstMatchString.match(accListItem[2]) # re.compile(r'C\\', re.IGNORECASE)
                if (orMatch != None):
                    #print "Need to expand the C\ in this OR_NAME2: ", accListItem[2]
                    #print orMatch.end()
                    #print "County Road" + accListItem[2][orMatch.end():]
                    accListItem[2] = "County Road " + accListItem[2][orMatch.end():]
                else:
                    pass
            else:
                pass
            
            if (accListItem[5] != None):
                atMatch = firstMatchString.match(accListItem[5]) # re.compile(r'C\\', re.IGNORECASE)
                if (atMatch != None):
                    #print "Need to expand the C\ in this AT_NAME2: ", accListItem[5]
                    #print atMatch.end()
                    #print "County Road" + accListItem[5][atMatch.end():]
                    accListItem[5] = "County Road " +accListItem[5][atMatch.end():]
                else:
                    pass
            else:
                pass
            
            
            accListDict[accListItem[0]] = accListItem
            
            #print "After county name fix:"
            #print "accListDict[accListItem[0]]'s ON_ROAD_NAME & AT_ROAD_NAME  = " + str(accListDict[accListItem[0]][2]) + " & " + str(accListDict[accListItem[0]][5])
        
        
        print "####################################################"
        print "# End of First String Matching                     #"
        print "####################################################"
        
        
        
        for accListItem in accListDict.values():
            
            # Need to remove slashes, and if they have a
            # trailing directional make sure that it is
            # in the proper field.
            
            # Pattern matches one or more numbers, then
            # a forward slash, then a directional letter.
            if (accListItem[2] != None):
                orMatch = secondMatchString.match(accListItem[2]) # re.compile(r'^County Road [0-9]+/[ensw]', re.IGNORECASE)
                if (orMatch != None):
                    #print "Need to remove the slash and trailing directional from this OR_NAME2: ", accListItem[2]
                    #print "Match ended at: ", orMatch.end()
                    #print orMatch.group(0)[0:orMatch.end()-2] # The County Road without the slash and trailing directional -- Place this back in OR_NAME2
                    #print orMatch.group(0)[-2:-1] # The slash
                    #print orMatch.group(0)[-1:] # The trailing directional -- Check to see if this is the same as OR_SFX, if not, update OR_SFX
                    accListItem[2] = orMatch.group(0)[0:orMatch.end()-2]
                    
                    if (accListItem[4] != orMatch.group(0)[-1:]):
                        #print "OR_SFX does not match the trailing directional in OR_NAME2"
                        accListItem[4] = orMatch.group(0)[-1:]
                    else:
                        pass
                else:
                    pass
            else:
                pass
            
            if (accListItem[5] != None):
                atMatch = secondMatchString.match(accListItem[5]) # re.compile(r'^County Road [0-9]+/[ensw]', re.IGNORECASE)
                if (atMatch != None):
                    #print "Need to remove the slash and trailing directional from this AT_NAME2: ", accListItem[5]
                    #print "Match ended at: ", atMatch.end()
                    #print atMatch.group(0)[0:atMatch.end()-2] # The County Road without the slash and trailing directional -- Place this back in AT_NAME2
                    #print atMatch.group(0)[-2:-1] # The slash
                    #print atMatch.group(0)[-1:] # The trailing directional -- Check to see if this is the same as AT_SFX, if not, update AT_SFX
                    accListItem[5] = atMatch.group(0)[0:atMatch.end()-2]
                    if (accListItem[7] != atMatch.group(0)[-1:]):
                        #print "AT_SFX does not match the trailing directional in AT_NAME2"
                        accListItem[7] = atMatch.group(0)[-1:]
                    else:
                        pass
                else:
                    pass
            else:
                pass
            
            accListDict[accListItem[0]] = accListItem
        
        
        print "####################################################"
        print "# End of Second String Matching                    #"
        print "####################################################"
        
        #print "At the end of third string matching, this is how the road names look:"
        
        atMatch = None
        orMatch = None
        
        for accListItem in accListDict.values():
            
            # Need to separate 2NDST, 14THST and similar ones.
            
            if (accListItem[2] != None):
                orMatch = thirdMatchString.match(accListItem[2]) # thirdMatchString = re.compile(r'[0-9]+[nts][dht][a-z][a-z]', re.IGNORECASE)
                if (orMatch != None):
                    #print "Need to change this from #NDST/#STST/#THST, to #ND/#ST/#TH and have ST in the OR_TYPE field: ", accListItem[2]
                    #print orMatch.end()
                    #print accListItem[2][0:orMatch.end()-2]
                    #print accListItem[2][-2:]
                    accListItem[2] = accListItem[2][0:orMatch.end()-2]
                    if (accListItem[3] != orMatch.group(0)[-2:]):
                        #print "OR_TYPE does not match the TYPE erroneously concatenated in OR_NAME2"
                        #print "New OR_TYPE should be: ", accListItem[2][-2:]
                        accListItem[3] = orMatch.group(0)[-2:]
                    else:
                        pass
                else:
                    pass
            else:
                pass
            
            if (accListItem[5] != None):
                atMatch = thirdMatchString.match(accListItem[5]) # thirdMatchString = re.compile(r'[0-9]+[nts][dht][a-z][a-z]', re.IGNORECASE)
                if (atMatch != None):
                    #print "Need to change this from #NDST/#STST/#THST, to #ND/#ST/#TH and have ST in the AT_TYPE field: ", accListItem[5]
                    #print atMatch.end()
                    #print accListItem[5][0:atMatch.end()-2]
                    #print accListItem[5][-2:]
                    accListItem[5] = accListItem[5][0:atMatch.end()-2]
                    if (accListItem[6] != atMatch.group(0)[-2:]):
                        #print "AT_TYPE does not match the TYPE erroneously concatenated in AT_NAME2"
                        #print "New AT_TYPE should be: ", accListItem[5][-2:]
                        accListItem[6] = atMatch.group(0)[-2:]
                    else:
                        pass
                else:
                    pass
            else:
                pass
            
            accListDict[accListItem[0]] = accListItem
            
            #print "ON_ROAD_NAME & AT_ROAD_NAME  = " + str(accListDict[accListItem[0]][2]) + " & " + str(accListDict[accListItem[0]][5])
        
        print "####################################################"
        print "# End of Third String Matching                     #"
        print "####################################################"
        
        atMatch = None
        orMatch = None
        
        for accListItem in accListDict.values():
        
            # Need to remove /S from 2ND/S, and similar.
            # Check to see if the trailing directional is in the proper field.
            # If not, update the field to be correct.
            
            if (accListItem[2] != None):
                orMatch = fourthMatchString.match(accListItem[2]) # fourthMatchString = re.compile(r'[0-9]+[nts][dht]/[ensw]', re.IGNORECASE)
                if (orMatch != None):
                    #print "Need to remove the slash and trailing directional from this OR_NAME2: ", accListItem[2]
                    #print "Match ended at: ", orMatch.end()
                    #print orMatch.group(0)[0:orMatch.end()-2] # The Street Name without the slash and trailing directional -- Place this back in OR_NAME2
                    #print orMatch.group(0)[-2:-1] # The slash
                    #print orMatch.group(0)[-1:] # The trailing directional -- Check to see if this is the same as OR_SFX, if not, update OR_SFX
                    accListItem[2] = orMatch.group(0)[0:orMatch.end()-2]
                    if (accListItem[4] != orMatch.group(0)[-1:]):
                        #print "OR_SFX does not match the trailing directional in OR_NAME2"
                        accListItem[4] = orMatch.group(0)[-1:]
                    else:
                        pass
                else:
                    pass
            else:
                pass
        
            if (accListItem[5] != None):
                atMatch = fourthMatchString.match(accListItem[5]) # fourthMatchString = re.compile(r'[0-9]+[nts][dht]/[ensw]', re.IGNORECASE)
                if (atMatch != None):
                    #print "Need to remove the slash and trailing directional from this AT_NAME2: ", accListItem[5]
                    #print "Match ended at: ", atMatch.end()
                    #print atMatch.group(0)[0:atMatch.end()-2] # The Street Name without the slash and trailing directional -- Place this back in AT_NAME2
                    #print atMatch.group(0)[-2:-1] # The slash
                    #print atMatch.group(0)[-1:] # The trailing directional -- Check to see if this is the same as AT_SFX, if not, update AT_SFX
                    accListItem[5] = atMatch.group(0)[0:atMatch.end()-2]
                    if (accListItem[7] != atMatch.group(0)[-1:]):
                        #print "AT_SFX does not match the trailing directional in AT_NAME2"
                        accListItem[7] = atMatch.group(0)[-1:]
                    else:
                        pass
                else:
                    pass
            else:
                pass
        
            accListDict[accListItem[0]] = accListItem
        
        
        print "####################################################"
        print "# End of Fourth String Matching                    #"
        print "####################################################"
        
        ### Fifth and Sixth String matching are more complex and
        ### will take more time to rebuild.
        
        ### But, I can probably remove some of the complexity
        ### by making sure that I'm only focused on one county
        ### at a time (Thus removing county checks)
        ### and by making sure that the years are selected for
        ### properly.
        
        atMatch = None
        orMatch = None
        
        for accListItem in accListDict.values():
          
            # If there are problems, try moving orMatch reinitialization here.
            # This cursor updates the on road name (ON_ROAD_NAME) for the
            # accident data if the data is from the year 2009 or before,
            # when the maximum field length for road names was only 6.
            
            if (accListItem[2] != None and (len(accListItem[2]) == 5 or len(accListItem[2]) == 6)):
                try:
                    accYear = accListItem[9][0:4] # Get the first 4 characters of the Accident_Key
                    accYear = int(accYear) # Turn them into an integer.
                except:
                    accYear = 2000 # If there was a problem, assume the accident was from 2000.
                
                if (accYear <= 2009): ## Replaced previous check with this check for accYears which are 2009 .
                    
                    # The next line creates a regex pattern using the current row's AT_ROAD_NAME field
                    # as the pattern, ignoring case.
                    
                    fifthMatchString = re.compile(r'{}'.format(re.escape(accListItem[2])), re.IGNORECASE)
                    #print "This data about", accListItem[2], "is from", int(accListItem.YEAR)
                    roadMatchCounter = 0
                    for roadNamesItem in roadNamesList:
                        noSpacesRoadName = str(roadNamesItem[0]).replace(' ', '')
                        orMatch = fifthMatchString.match(noSpacesRoadName)
                        if (orMatch != None):
                            #print "Found a match for", accListItem[2], "and", roadNamesItem[0]
                            roadMatchCounter += 1
                        else:
                            pass
                    # If there was only one match between the accident road name for that county and the
                    # unique road names for that county, replace the accident road name with the
                    # unique road name. -- Does another loop through the roadList to accomplish
                    # this. Probably not the most efficient way to do this, but it works.
                    if roadMatchCounter == 1:
                        #print "Exactly one road matched in this county. Road Matches: ", roadMatchCounter
                        for roadNamesItem in roadNamesList:        
                            # Make sure that the length of the roadNamesItem's name is 6 or greater
                            # and that it is larger than the accListItem.
                            if len(roadNamesItem[0]) > 5 and len(roadNamesItem[0]) > len(accListItem[2]):
                                noSpacesRoadName = str(roadNamesItem[0]).replace(' ', '')
                                orMatch = fifthMatchString.match(noSpacesRoadName)
                                if (orMatch != None):
                                    AddMessage("Old on road name was: " + str(accListItem[2]))
                                    AddMessage("New on road name will be corrected to: " + str(roadNamesItem[0]).upper())
                                    accListItem[2] = str(roadNamesItem[0]).upper()
                                else:
                                    pass
                            else:
                                pass
                            
                    elif roadMatchCounter > 1:
                        #print "More than one road matched in this county. Road Matches: ", roadMatchCounter
                        pass
                    else:
                        pass
                else:
                    pass
            else:
                pass
            
            accListDict[accListItem[0]] = accListItem
        
        print "####################################################"
        print "# End of Fifth String Matching                     #"
        print "####################################################"
        
        atMatch = None
        orMatch = None
        
        for accListItem in accListDict.values():
          
            # If there are problems, try moving atMatch reinitialization here.
            # This cursor updates the at road name (AT_ROAD_NAME) for the
            # accident data if the data is from the year 2009, when the
            # maximum field length for road names was only 6.
            
            if (accListItem[5] != None and (len(accListItem[5]) == 5 or len(accListItem[5]) == 6)):
                try:
                    accYear = accListItem[9][0:4] # Get the first 4 characters of the Accident_Key
                    accYear = int(accYear) # Turn them into an integer.
                except:
                    accYear = 2000 # If there was a problem, assume the accident was from 2000.
                
                if (accYear <= 2009): ## Replaced previous check with this check for accYears which are 2009 .
                    
                    # The next line creates a regex pattern using the current row's AT_ROAD_NAME field
                    # as the pattern, ignoring case.
                    
                    sixthMatchString = re.compile(r'{}'.format(re.escape(accListItem[5])), re.IGNORECASE)
                    #print "This data about", accListItem[5], "is from", int(accListItem.YEAR)
                    roadMatchCounter = 0
                    for roadNamesItem in roadNamesList:
                        # Removes all the spaces from the roadName, allowing
                        # for matching of UNIONC to UNION CHAPEL, LONETR to LONE TREE,
                        # TRICIT to TRI CITY, etc.
                        noSpacesRoadName = str(roadNamesItem[0]).replace(' ', '')
                        atMatch = sixthMatchString.match(noSpacesRoadName)
                        if (atMatch != None):
                            #print "Found a match for", accListItem[5], "and", roadNamesItem[0]
                            roadMatchCounter += 1
                        else:
                            pass
                    # If there was only one match between the accident road name for that county and the
                    # unique road names for that county, replace the accident road name with the
                    # unique road name. -- Does another loop through the roadList to accomplish
                    # this. Probably not the most efficient way to do this, but it works.
                    if roadMatchCounter == 1:
                        #print "Exactly one road matched in this county. Road Matches: ", roadMatchCounter
                        for roadNamesItem in roadNamesList:        
                            # Make sure that the length of the roadNamesItem's name is 6 or greater
                            # and that it is larger than the accListItem.
                            if len(roadNamesItem[0]) > 5 and len(roadNamesItem[0]) > len(accListItem[5]):
                                noSpacesRoadName = str(roadNamesItem[0]).replace(' ', '')
                                atMatch = sixthMatchString.match(noSpacesRoadName)
                                if (atMatch != None):
                                    AddMessage("Old at road name was: " + str(accListItem[5]))
                                    AddMessage("New at road name will be corrected to: " + str(roadNamesItem[0]).upper())
                                    accListItem[5] = str(roadNamesItem[0]).upper()
                                else:
                                    pass
                            else:
                                pass
                            
                    elif roadMatchCounter > 1:
                        #print "More than one road matched in this county. Road Matches: ", roadMatchCounter
                        pass
                    else:
                        pass
                else:
                    pass
            else:
                pass
            
            accListDict[accListItem[0]] = accListItem
        
        print "####################################################"
        print "# End of Sixth String Matching                     #"
        print "####################################################"
        
        
        for accListItem in accListDict.values():
            
            # Remove the extra space in roads with names like "5 TH" and "17 TH".
            
            if (accListItem[2] != None):
                orMatch = seventhMatchString.match(accListItem[2]) # re.compile(r'^[0-9]+\s[t][h]', re.IGNORECASE)
                if (orMatch != None):
                    #print "Need to remove the extra space between the number and the 'TH' in this ON_ROAD_NAME: ", accListItem[2]
                    #print orMatch.end()
                    #print "County Road" + accListItem[2][orMatch.end():]
                    accListItem[2] = orMatch.group(0)[0:orMatch.end()-3] + orMatch.group(0)[orMatch.end()-2:orMatch.end()]
                    print accListItem[2]
                else:
                    pass
            else:
                pass
            
            if (accListItem[5] != None):
                atMatch = seventhMatchString.match(accListItem[5]) # re.compile(r'^[0-9]+\s[t][h]', re.IGNORECASE)
                if (atMatch != None):
                    #print "Need to remove the extra space between the number and the 'TH' in this AT_ROAD_NAME: ", accListItem[5]
                    #print atMatch.end()
                    #print "County Road" + accListItem[5][atMatch.end():]
                    accListItem[5] = atMatch.group(0)[0:atMatch.end()-3] + atMatch.group(0)[atMatch.end()-2:atMatch.end()]
                    print accListItem[5]
                else:
                    pass
            else:
                pass
            
            accListDict[accListItem[0]] = accListItem
        
        
        print "####################################################"
        print "# End of Seventh String Matching                   #"
        print "####################################################"
        
        print "####################################################"
        print "# Rebuilding Intersection Names                    #"
        print "####################################################"
        
        for accListItem in accListDict.values():
            
            # Rebuild the intersection names in the form of:
            # onRoadName + " | " + atRoadName
            
            onRoadName = ""
            atRoadName = ""
            
            if accListItem[2] != None:
                onRoadName = str(accListItem[2])
            else:
                pass
            
            if accListItem[5] != None:
                atRoadName = str(accListItem[5])
            else:
                pass
            
            if onRoadName != None and atRoadName != None:
                accListItem[8] = str(onRoadName + " | " + atRoadName)
            else:
                pass
            
            accListDict[accListItem[0]] = accListItem
        
        print "####################################################"
        print "# Intersection Names Rebuilt                       #"
        print "####################################################"
        
        ### Don't forget to add accident_key to the list of sCursor
        ### fields. Need it to properly select which accidents
        ### need their road names un-truncated.
        
        print "####################################################"
        print "# Applying Changes with an Update Cursor           #"
        print "####################################################"
        
        uCursor = UpdateCursor(accidentData, accidentCursorFields)
        
        for uCursorRow in uCursor:
            try:
                accListItem = accListDict[uCursorRow[0]]
                uCursor.updateRow(accListItem)
            except:
                pass        
        try:
            del uCursor
        except:
            pass
        
        print "####################################################"
        print "# Update completed.                                #"
        print "####################################################"
        
        
    except Exception as newException:
        print str(newException)
        del newException
    
    
    finally:
        try:
            del sCursor
        except:
            pass
        try:
            del uCursor
        except:
            pass
Ejemplo n.º 19
0
def fast_join(fc_target, fc_target_keyfield, fc_join, fc_join_keyfield,
              fields_to_join):

    start_time = perf_counter()

    # make field dict for join fc fields {fname: [dtype, len]}
    jfields_names = [f.name for f in ListFields(fc_join)]
    jfields_dtypes = [f.type for f in ListFields(fc_join)]
    jfields_len = [f.length for f in ListFields(fc_join)]
    dts_lens = [[type, len] for type, len in zip(jfields_dtypes, jfields_len)]
    jfields_dict = dict(zip(jfields_names, dts_lens))

    # field names in the target fc
    target_start_fields = [f.name for f in ListFields(fc_target)]

    # as needed, add field(s) to target FC if it doesn't already exist.
    print(f"Adding fields {fields_to_join} to target table {fc_target}...")
    import pdb
    pdb.set_trace()
    for jfield in fields_to_join:
        if jfield not in target_start_fields:
            ftype = jfields_dict[jfield][0]
            flen = jfields_dict[jfield][1]

            management.AddField(in_table=fc_target,
                                field_name=jfield,
                                field_type=ftype,
                                field_length=flen)
        else:
            print(
                f"\t{jfield} already in {fc_target}'s fields. Will be OVERWRITTEN with joined data..."
            )

    cur_fields = [fc_target_keyfield] + fields_to_join

    join_dict = {}
    print("reading data from join table...")
    with SearchCursor(fc_join, cur_fields) as scur:
        for row in scur:
            jkey = row[cur_fields.index(fc_join_keyfield)]
            vals_to_join = [
                row[cur_fields.index(fname)] for fname in fields_to_join
            ]
            join_dict[jkey] = vals_to_join

    print("writing join data to target table...")
    with UpdateCursor(fc_target, cur_fields) as ucur:
        for row in ucur:
            jkey = row[cur_fields.index(fc_join_keyfield)]

            # if a join id value is in the target table but not the join table,
            # skip the join. The values in the resulting joined column will be null for these cases.
            if join_dict.get(jkey):
                vals_to_join = join_dict[jkey]
            else:
                continue

            row_out = [jkey] + vals_to_join
            row = row_out
            ucur.updateRow(row)

    elapsed_sec = round(perf_counter() - start_time, 1)

    print(f"Successfully joined fields {fields_to_join} from {fc_join} onto {fc_target}" \
        f" in {elapsed_sec} seconds!")
def OffsetDirectionMatrix2(offsetOptions):
    """Update the accidentDataWithOffsetOutput geometry with data from geocodedFeatures.
    
    Keyword arguments to be included in the options class:
    gdbLocation -- The gdb where the outputWithOffsetLocations feature class resides.
    accidentDataAtIntersections -- A point feature class containing geocoded accident information.
    accidentDataWithOffsetOutput -- A point feature class with the same structure as the 
        geocodedFeatuers AND an "isOffset" row of type "TEXT" with length of at least 5.
    whereClauseInUse -- Whether or not the script will use a where clause. Boolean value.
    roadsFeaturesLocation -- The path to the local roads centerline feature class.
    aliasTable -- The path to the roads alias table for the roads centerline feature class.
    maxDegreesDifference -- The number of degrees that a potentially matching accident
        offset location can be from the direction specified. If this is set to -1, the check
        will be skipped and no matching accident offset locations will be rejected, even if
        they are in the opposite direction from where the accident record says they should
        be. I.e. the accident could be offset to the North when the accident record says that
        it should be South of the intersection when this check is skipped.
    XYFieldList -- The list of fields to use from the copy of the geocoded accidents feature
        class after that copy has had POINT_X and POINT_Y fields added and calculated.
    """
    
    ###########################################################################
    ## Function overview:
    ## For each row in the feature class of accidents that have been geolocated
    ## to an intersection:
    ###########################################################################
    # Make sure that the Status for the point is not 'U' -- Unlocated.
    # Might take care of test for 'U' points before getting to this
    # step in the process, but if not, be sure to test for it here.
    # Create/calculate intersection X & Y field named POINT_X and POINT_Y.
    # Then, calculate those fields.
    # Then, create a buffer.
    # Then, select the On_Road in the roads layer.
    # Then, intersect the buffer with the roads layer to create an offset
    # points layer.
    # Then, split the offset points from potential multipart points to
    # singlepart points.
    ###########################################################################
    # Then, use the "SHAPE@XY" token to access the X & Y of the individual
    # offset points and compare them to the X & Y values in the POINT_X and
    # POINT_Y fields, which hold the values for the related roads' intersection
    # that the accidents were geolocated to.
    # Then, test the intersected points to find the best one for the given
    # direction.
    ###########################################################################
    # Then, append the information for that point into a list.
    # Then, delete the buffer and intersection layer.
    # Repeat for each other row...
    ###########################################################################
    # When all the rows are finished,
    # Append the attribute information for the
    # related accident into each offset point's row.
    # Lastly, write the data for all the offset point rows
    # into the output layer.
    ###########################################################################
    # Maximum angle difference code confirmed to be working. -- 2015-03-18
    # 771/771 manually checked look good (for the information given) using
    # UpdateKdotNameInCenterline(), Where Clause for selection, and
    # Maximum Angle Difference.
    # Locates 771/862 non-'U' points without the modified versions of 
    # ON_ROAD_NAME/AT_ROAD/AT_ROAD_DIRECTION/AT_ROAD_DIST_FEET labeled fields
    # and 803/862 with them. 
    ###########################################################################
    
    AddMessage("The value of the useKDOTFields option is: " + str(offsetOptions.useKDOTFields))
    
    roadsToIntersect = offsetOptions.roadsFeaturesLocation
    roadsAliasTable = offsetOptions.aliasTable
    geocodedFeatures = offsetOptions.accidentDataAtIntersections
    outputWithOffsetLocations = offsetOptions.accidentDataWithOffsetOutput
    whereClauseFlag = offsetOptions.whereClauseInUse
    maximumDegreesDifference = offsetOptions.maxDegreesDifference
    KDOTFieldUse = offsetOptions.useKDOTFields
    
    AddMessage("The value for KDOTFieldUse is: " + str(KDOTFieldUse))
    
    if str(KDOTFieldUse).lower() == 'false':
        featuresWithXYFieldList = offsetOptions.NonKDOTXYFieldList
        AddMessage("Using nonKDOTXYFieldList.")
    else:
        featuresWithXYFieldList = offsetOptions.KDOTXYFieldList
    
    geodatabaseLocation = getGDBLocationFromFC(outputWithOffsetLocations)
    
    env.workspace = geodatabaseLocation
    env.overwriteOutput = True
    geocodedWhereClause = "STATUS <> 'U'"
    featuresWithXY = 'geocodedWithXY'
    geocodedLocXY = r'in_memory\geocodedFeatures_Loc_XY' # Changed this to an in_memory location also.
    
    # Scratch data locations
    intermediateAccidentBuffer = r'in_memory\intermediateAccidentBuffer'
    intermediateAccidentIntersect = r'in_memory\intermediateAccidentIntersect'
    intermediateAccidentIntersectSinglePart = r'in_memory\intermediateAccidentIntersectSinglePart'
    # Added 2016-09-06 after the Wichita Area points started processing. Slowly.
    intermediateRoadsToIntersect = r'in_memory\roadsToIntersect'
    intermediateRoadsAliasTable = r'in_memory\roadsAliasTable'
    
    descSpatialReference = Describe(geocodedFeatures).spatialReference
    
    # Make a feature layer of geocodedFeatures using a where clause to restrict to those points
    # which have been located to an intersection, then add XY to it.
    MakeFeatureLayer_management(geocodedFeatures, featuresWithXY, geocodedWhereClause)
    CopyFeatures_management(featuresWithXY, geocodedLocXY)
    AddXY_management(geocodedLocXY)
    
    roadsAsFeatureLayer = 'ConflatedRoadsFeatureLayer'
    
    # Roads copied to memory.
    CopyFeatures_management(roadsToIntersect, intermediateRoadsToIntersect)
    
    MakeFeatureLayer_management(intermediateRoadsToIntersect, roadsAsFeatureLayer)
    
    # Use Point_X & Point_Y for the geolocated intersection location.
    # Use shape tokens for the x & y of the points which
    # result from intersecting the buffer & road geometries.
    
    geocodedAccidentsList = list()
    singlePartOffsetAccidentsList = list()
    
    print "The path of the geocodedFeatures used is: " + geocodedFeatures
    
    #AddMessage("The field names used in the search cursor are:")
    #for fieldListItem in featuresWithXYFieldList:
    #    AddMessage(fieldListItem)
    
    accidentsCursor = SearchCursor(geocodedLocXY, featuresWithXYFieldList)
    
    for accidentRow in accidentsCursor:
        geocodedAccidentsList.append(accidentRow)
    
    try:
        del accidentsCursor
    except:
        pass
    
    print 'whereClauseFlag is: ' + str(whereClauseFlag)
    print 'Starting the offset process...'
    
    accCounter = -1
    
    env.outputCoordinateSystem = descSpatialReference
    
    if whereClauseFlag == True:
        
        # Don't need to create a relate or a join.
        # Just need to do a select on the would-be joined/related table
        # to get the SEGIDs, then use those to do a select
        # for the GCIDs the conflation roads.
        
        # Try using table to table here instead of copy features.
        # For some reason, arcpy doesn't like this table when it's in the
        # ar63 FGDBs.
        ####TableToTable_conversion(roadsAliasTable, 'in_memory', 'roadsAliasTable') # == intermediateRoadsAliasTable
        #CopyFeatures_management(roadsAliasTable, intermediateRoadsAliasTable)
        
        ####roadsAliasTableView = MakeTableView_management(intermediateRoadsAliasTable, 'roadsAliasTableView')
        
        fieldNamesList = ["RD", "LABEL"]
        
        aliasFieldNamesToAdd = ExtractAliasFields(roadsAsFeatureLayer, aliasFieldBaseName)
        
        if aliasFieldNamesToAdd is not None:
            fieldNamesList = fieldNamesList + aliasFieldNamesToAdd
        else:
            pass
        
        for geocodedAccident in geocodedAccidentsList:
            accCounter += 1
            print 'Working on geocodedAccident #' + str(accCounter)
            # Create a point here with the x & y from the geocodedAccident,
            # add the coordinate system, OBJECTID, and AccidentID
            # from the geocodedAccident layer.
            # Then, create a buffer with it.
            
            #if geocodedAccident[2] is not None and geocodedAccident[3] is not None:
            tempPoint = Point(geocodedAccident[2], geocodedAccident[3])
            #print "\t " + str(tempPoint.X) + ", " + str(tempPoint.Y)
            tempPointGeometry = PointGeometry(tempPoint, descSpatialReference)
            accidentDistanceOffset = geocodedAccident[7]
            accidentClusterTolerance = 1
            
            # this part needs to dynamically figure out how many alias fields there are
            # and what their names are, based on supplying it with the base name for the
            # alias field, which in this case is 'Alias_Name_' -- then, it should look
            # at all of the fields in flattenedDataTable and determine which ones
            # to use as alias fields.
            # In the current set, there are 6 alias fields.
            # So, it should generate a SQL clause to check each roadName for
            # the given 'RD' and 'Label' fields as well as the 6 alias fields.
            # If the crash point's roadname is in any of those, the road should be
            # selected.
            try:
                #####################
                # Offsetting while using a WhereClause follows:
                #####################
                if accidentDistanceOffset is not None: # In Python it's None, whereas in an ArcGIS table it's <null>
                    
                    accidentDistanceOffset = int(accidentDistanceOffset)
                    
                    if accidentDistanceOffset != 0:
                        
                        Buffer_analysis(tempPointGeometry, intermediateAccidentBuffer, accidentDistanceOffset)
                        
                        firstRoadName = str(geocodedAccident[5])
                        if firstRoadName is not None:
                            firstRoadName = firstRoadName.upper()
                        else:
                            firstRoadName = 'NotAValidRoad'
                        
                        secondRoadName = str(geocodedAccident[8])
                        if secondRoadName is not None:
                            secondRoadName = secondRoadName.upper()
                        else:
                            secondRoadName = 'NotAValidRoad'
                        
                        thirdRoadName = ParseMatchAddr(geocodedAccident[9])
                        if thirdRoadName is not None:
                            thirdRoadName = thirdRoadName.upper()
                        else:
                            thirdRoadName = 'NotAValidRoad'
                        
                        roadNamesList = [firstRoadName, secondRoadName, thirdRoadName]
                        
                        streetWhereClause = GenerateRoadSelectQuery(roadNamesList, fieldNamesList)
                        
                        #aliasIDsList = getAliasIDs(roadNamesList, roadsAliasTableView)
                        #aliasIDsLength = len(aliasIDsList)
                        '''
                        if aliasIDsLength != 0:
                            aliasIDsString = """("""
                            for x in xrange(aliasIDsLength):
                                if (x != (aliasIDsLength - 1)):
                                    aliasIDsString += """'""" + aliasIDsList[x] + """',"""
                                else:
                                    aliasIDsString += """'""" + aliasIDsList[x] + """')"""
                            
                            streetWhereClause = (""" "RD" = '""" + firstRoadName  + """'""" + """ OR """ +
                                                """ "LABEL" = '""" + firstRoadName + """'""" + """ OR """ +
                                                """ "RD" = '""" + secondRoadName  + """'""" + """ OR """ +
                                                """ "LABEL" = '""" + secondRoadName + """'""" + """ OR """ +
                                                """ "RD" = '""" + thirdRoadName  + """'""" + """ OR """ +
                                                """ "LABEL" = '""" + thirdRoadName + """'""" +
                                                """ OR GCID IN """ + aliasIDsString)
                        else:
                            #Without the aliasIDs.
                            streetWhereClause = (""" "RD" = '""" + firstRoadName  + """'""" + """ OR """ +
                                                """ "LABEL" = '""" + firstRoadName + """'""" + """ OR """ +
                                                """ "RD" = '""" + secondRoadName  + """'""" + """ OR """ +
                                                """ "LABEL" = '""" + secondRoadName + """'""" + """ OR """ +
                                                """ "RD" = '""" + thirdRoadName  + """'""" + """ OR """ +
                                                """ "LABEL" = '""" + thirdRoadName + """'""")
                        '''
                        
                        SelectLayerByAttribute_management(roadsAsFeatureLayer, "NEW_SELECTION", streetWhereClause)
                        
                        selectionCount = str(int(GetCount_management(roadsAsFeatureLayer).getOutput(0)))
                        
                        if int(selectionCount) != 0:
                            featuresToIntersect = [roadsAsFeatureLayer, intermediateAccidentBuffer]
                            Intersect_analysis(featuresToIntersect, intermediateAccidentIntersect, "ALL", "", "POINT")
                            
                            if  int(str(GetCount_management(intermediateAccidentIntersect))) > 0:
                                MultipartToSinglepart_management(intermediateAccidentIntersect, intermediateAccidentIntersectSinglePart)
                                
                                singlePartsCursor = SearchCursor(intermediateAccidentIntersectSinglePart, ['SHAPE@XY'])
                                for singlePart in singlePartsCursor:
                                    singlePartListItem = [singlePart[0], geocodedAccident[2], geocodedAccident[3], geocodedAccident[4],
                                                               geocodedAccident[6], geocodedAccident[0]]
                                    singlePartOffsetAccidentsList.append(singlePartListItem)
                                
                                try:
                                    del singlePartsCursor
                                except:
                                    pass
                            else:
                                pass
                            try:
                                Delete_management(intermediateAccidentIntersect)
                            except:
                                pass
                        else:
                            pass
                            #print 'Zero road segments selected. Will not attempt to offset.'
                    else:
                        pass
                        #print 'AT_ROAD_DIST_FEET is 0. Will not attempt to offset.
                else:
                    pass
                    #print 'AT_ROAD_DIST_FEET is null. Will not attempt to offset.'
                
            except:
                # Need to log the warnings with details so that I know what's wrong with them.
                print "WARNING:"
                print "An error occurred which prevented the accident point with Acc_Key: " + str(geocodedAccident[4])
                print "from being buffered and/or offset properly."
                errorItem = sys.exc_info()[1]
                errorStatement = str(errorItem.args[0])
                print errorStatement
                
                try:
                    del errorItem
                except:
                    pass
    
    
    elif whereClauseFlag == False:
        for geocodedAccident in geocodedAccidentsList:
            
            # Create a point here with the x & y from the geocodedAccident,
            # add the coordinate system, OBJECTID, and AccidentID
            # from the geocodedAccident layer.
            # Then, create a buffer with it.
            
            #if geocodedAccident[2] is not None and geocodedAccident[3] is not None:
            tempPoint = Point(geocodedAccident[2], geocodedAccident[3])
            #print "\t " + str(tempPoint.X) + ", " + str(tempPoint.Y)
            tempPointGeometry = PointGeometry(tempPoint, descSpatialReference)
            accidentDistanceOffset = geocodedAccident[7]
            ##accidentClusterTolerance = 2
            
            try:
                #####################
                # Offsetting while not using a WhereClause follows:
                #####################
                
                if accidentDistanceOffset is not None:
                    if int(accidentDistanceOffset) != 0:
                        accidentDistanceOffset = int(accidentDistanceOffset)
                        
                        Buffer_analysis(tempPointGeometry, intermediateAccidentBuffer, accidentDistanceOffset)
                        
                        featuresToIntersect = [roadsAsFeatureLayer, intermediateAccidentBuffer]
                        Intersect_analysis(featuresToIntersect, intermediateAccidentIntersect, "ALL", "", "POINT")
                        if  int(str(GetCount_management(intermediateAccidentIntersect))) > 0:
                            MultipartToSinglepart_management(intermediateAccidentIntersect, intermediateAccidentIntersectSinglePart)
                            
                            singlePartsCursor = SearchCursor(intermediateAccidentIntersectSinglePart, ['SHAPE@XY'])
                            for singlePart in singlePartsCursor:
                                singlePartListItem = [singlePart[0], geocodedAccident[2], geocodedAccident[3], geocodedAccident[4],
                                                           geocodedAccident[6], geocodedAccident[0]]
                                singlePartOffsetAccidentsList.append(singlePartListItem)
                            
                            try:
                                del singlePartsCursor
                            except:
                                pass
                            try:
                                del intermediateAccidentIntersect
                            except:
                                pass
                        else:
                            pass
                    else:
                        pass
                        # Need to change this to being offset to the intersection, i.e. no movement, but
                        # considered to be correctly offset all the same.
                        #print 'AT_ROAD_DIST_FEET is 0. Will not attempt to offset.' 
                else:
                    pass
                    #print 'AT_ROAD_DIST_FEET is null. Will not attempt to offset.'
            except:
                print "WARNING:"
                print "An error occurred which prevented the accident point with Acc_Key: " + str(geocodedAccident[4])
                print "from being buffered and/or offset properly."
                errorItem = sys.exc_info()[1]
                errorStatement = str(errorItem.args[0])
                print errorStatement
                
                try:
                    del errorItem
                except:
                    pass
    
    
    else:
        print 'Please set the whereClauseFlag to either (boolean) True or False.'
        #pass
    
    offsetDictionaryByAccidentKey = dict()
    listContainer = list()
    
    # Group the rows by accident_key for further analysis,
    # and add them to the dictionary/list/list data structure.
    
    for singlePartOffsetItem in singlePartOffsetAccidentsList:
        if singlePartOffsetItem[3] in offsetDictionaryByAccidentKey.keys():
            listContainer = offsetDictionaryByAccidentKey[singlePartOffsetItem[3]]
            listContainer.append(singlePartOffsetItem)
            offsetDictionaryByAccidentKey[singlePartOffsetItem[3]] = listContainer
        else:
            listContainer = list()
            listContainer.append(singlePartOffsetItem)
            offsetDictionaryByAccidentKey[singlePartOffsetItem[3]] = listContainer
    
    updateListValues = list()
    
    for accidentKey in offsetDictionaryByAccidentKey.keys():
        # accidentKey will be a unique accident key from the table
        listContainer = offsetDictionaryByAccidentKey[accidentKey]
        updateList = [-1, -1, -1, "False"]
        try:
            # Get the AT_ROAD_KDOT_DIRECTION/AT_ROAD_DIRECTION from the first (0th) entry.
            directionToTest = listContainer[0][4] 
            if directionToTest is not None:
                directionToTest = str(directionToTest).upper()
                updateList = findTheMostInterestingRow(listContainer, directionToTest, maximumDegreesDifference)
                if updateList[0] != -1:
                    updateListValues.append(updateList)
                else:
                    # -1 is not a valid Acc_Key. Slight optimization for the next for loop that uses this list so that
                    # it doesn't have to be re-checked each time through the list for each accident in the table.
                    pass
            else:
                print 'Direction to test is null.'
        except:
            pass
    
    accidentUpdateCursorFields = ['ACCIDENT_KEY', 'Shape@XY', 'isOffset']
    
    accidentUpdateCursor = UpdateCursor(outputWithOffsetLocations, accidentUpdateCursorFields)
    for cursorItem in accidentUpdateCursor:
        for updateListItem in updateListValues:
            if cursorItem[0] == updateListItem[0]:
                if str(cursorItem[2]).upper() == 'TRUE':    # Don't make any changes if true.
                    AddMessage('The accident point with Acc_Key: ' + str(cursorItem[0]) + ' is already offset.')
                else:                                       # Otherwise, offset the point.
                    editableCursorItem = list(cursorItem)
                    #AddMessage('Found a matching cursorItem with an Accident_Key of ' + str(cursorItem[0]) + ".")
                    editableCursorItem[1] = (updateListItem[1], updateListItem[2])
                    editableCursorItem[2] = updateListItem[3]
                    #AddMessage(str(editableCursorItem))
                    accidentUpdateCursor.updateRow(editableCursorItem)
                    
            else:
                pass
Ejemplo n.º 21
0
#-----TASK 3 - CREATE A NEW FIELD IN GEOGRAPHIC DATA AND UPDATE THE VALUE OF FIRST ROW----

from arcpy import env, AddField_management
from arcpy.da import UpdateCursor

# Setting the workspace
env.workspace = "C:\\demographicMaps\\setores"

# Setting the geographic data
geographicData = "sampa"
geographicDataTable = "sampa.dbf"

# The name of new field
newField = "white"

# Create the new field in table of geographic data
AddField_management(geographicDataTable, newField, "TEXT", 100)

with UpdateCursor(geographicDataOrder + ".shp", newField) as geographicRows:

    for row in geographicRows:

        # Update the value for each row in newField
        row[0] = "white people"
        geographicRows.updateRow(row)
Ejemplo n.º 22
0
def OffsetDirectionMatrix2(offsetOptions):
    """Update the accidentDataWithOffsetOutput geometry with data from geocodedFeatures.
    
    Keyword arguments to be included in the options class:
    gdbLocation -- The gdb where the outputWithOffsetLocations feature class resides.
    accidentDataAtIntersections -- A point feature class containing geocoded accident information.
    accidentDataWithOffsetOutput -- A point feature class with the same structure as the 
        geocodedFeatuers AND an "isOffset" row of type "TEXT" with length of at least 5.
    whereClauseInUse -- Whether or not the script will use a where clause. Boolean value.
    roadsFeaturesLocation -- The path to the local roads centerline feature class.
    aliasTable -- The path to the roads alias table for the roads centerline feature class.
    maxDegreesDifference -- The number of degrees that a potentially matching accident
        offset location can be from the direction specified. If this is set to -1, the check
        will be skipped and no matching accident offset locations will be rejected, even if
        they are in the opposite direction from where the accident record says they should
        be. I.e. the accident could be offset to the North when the accident record says that
        it should be South of the intersection when this check is skipped.
    XYFieldList -- The list of fields to use from the copy of the geocoded accidents feature
        class after that copy has had POINT_X and POINT_Y fields added and calculated.
    """
    
    ###########################################################################
    ## Function overview:
    ## For each row in the feature class of accidents that have been geolocated
    ## to an intersection:
    ###########################################################################
    # Make sure that the Status for the point is not 'U' -- Unlocated.
    # Might take care of test for 'U' points before getting to this
    # step in the process, but if not, be sure to test for it here.
    # Create/calculate intersection X & Y field named POINT_X and POINT_Y.
    # Then, calculate those fields.
    # Then, create a buffer.
    # Then, select the On_Road in the roads layer.
    # Then, intersect the buffer with the roads layer to create an offset
    # points layer.
    # Then, split the offset points from potential multipart points to
    # singlepart points.
    ###########################################################################
    # Then, use the "SHAPE@XY" token to access the X & Y of the individual
    # offset points and compare them to the X & Y values in the POINT_X and
    # POINT_Y fields, which hold the values for the related roads' intersection
    # that the accidents were geolocated to.
    # Then, test the intersected points to find the best one for the given
    # direction.
    ###########################################################################
    # Then, append the information for that point into a list.
    # Then, delete the buffer and intersection layer.
    # Repeat for each other row...
    ###########################################################################
    # When all the rows are finished,
    # Append the attribute information for the
    # related accident into each offset point's row.
    # Lastly, write the data for all the offset point rows
    # into the output layer.
    ###########################################################################
    # Maximum angle difference code confirmed to be working. -- 2015-03-18
    # 771/771 manually checked look good (for the information given) using
    # UpdateKdotNameInCenterline(), Where Clause for selection, and
    # Maximum Angle Difference.
    # Locates 771/862 non-'U' points without the modified versions of 
    # ON_ROAD_NAME/AT_ROAD/AT_ROAD_DIRECTION/AT_ROAD_DIST_FEET labeled fields
    # and 803/862 with them. 
    ###########################################################################
    
    AddMessage("The value of the useKDOTFields option is: " + str(offsetOptions.useKDOTFields))
    
    roadsToIntersect = offsetOptions.roadsFeaturesLocation
    roadsAliasTable = offsetOptions.aliasTable
    geocodedFeatures = offsetOptions.accidentDataAtIntersections
    outputWithOffsetLocations = offsetOptions.accidentDataWithOffsetOutput
    whereClauseFlag = offsetOptions.whereClauseInUse
    maximumDegreesDifference = offsetOptions.maxDegreesDifference
    KDOTFieldUse = offsetOptions.useKDOTFields
    
    AddMessage("The value for KDOTFieldUse is: " + str(KDOTFieldUse))
    
    if str(KDOTFieldUse).lower() == 'false':
        featuresWithXYFieldList = offsetOptions.NonKDOTXYFieldList
        AddMessage("Using nonKDOTXYFieldList.")
    else:
        featuresWithXYFieldList = offsetOptions.KDOTXYFieldList
    
    geodatabaseLocation = getGDBLocationFromFC(outputWithOffsetLocations)
    
    env.workspace = geodatabaseLocation
    env.overwriteOutput = True
    geocodedWhereClause = "STATUS <> 'U'"
    featuresWithXY = 'geocodedWithXY'
    geocodedLocXY = r'in_memory\geocodedFeatures_Loc_XY' # Changed this to an in_memory location also.
    
    # Scratch data locations
    intermediateAccidentBuffer = r'in_memory\intermediateAccidentBuffer'
    intermediateAccidentIntersect = r'in_memory\intermediateAccidentIntersect'
    intermediateAccidentIntersectSinglePart = r'in_memory\intermediateAccidentIntersectSinglePart'
    # Added 2016-09-06 after the Wichita Area points started processing. Slowly.
    intermediateRoadsToIntersect = r'in_memory\roadsToIntersect'
    intermediateRoadsAliasTable = r'in_memory\roadsAliasTable'
    
    descSpatialReference = Describe(geocodedFeatures).spatialReference
    
    # Make a feature layer of geocodedFeatures using a where clause to restrict to those points
    # which have been located to an intersection, then add XY to it.
    MakeFeatureLayer_management(geocodedFeatures, featuresWithXY, geocodedWhereClause)
    CopyFeatures_management(featuresWithXY, geocodedLocXY)
    AddXY_management(geocodedLocXY)
    
    roadsAsFeatureLayer = 'ConflatedRoadsFeatureLayer'
    
    # Roads copied to memory.
    CopyFeatures_management(roadsToIntersect, intermediateRoadsToIntersect)
    
    MakeFeatureLayer_management(intermediateRoadsToIntersect, roadsAsFeatureLayer)
    
    # Use Point_X & Point_Y for the geolocated intersection location.
    # Use shape tokens for the x & y of the points which
    # result from intersecting the buffer & road geometries.    
    
    geocodedAccidentsList = list()
    singlePartOffsetAccidentsList = list()
    
    print "The path of the geocodedFeatures used is: " + geocodedFeatures
    
    #AddMessage("The field names used in the search cursor are:")
    #for fieldListItem in featuresWithXYFieldList:
    #    AddMessage(fieldListItem)
    
    accidentsCursor = SearchCursor(geocodedLocXY, featuresWithXYFieldList)
    
    for accidentRow in accidentsCursor:
        geocodedAccidentsList.append(accidentRow)
    
    try:
        del accidentsCursor
    except:
        pass
    
    print 'whereClauseFlag is: ' + str(whereClauseFlag)
    print 'Starting the offset process...'
    
    accCounter = -1
    
    env.outputCoordinateSystem = descSpatialReference
    
    if whereClauseFlag == True:
        
        # Don't need to create a relate or a join.
        # Just need to do a select on the would-be joined/related table
        # to get the SEGIDs, then use those to do a select
        # for the GCIDs the conflation roads.
        
        # Try using table to table here instead of copy features.
        # For some reason, arcpy doesn't like this table when it's in the
        # ar63 FGDBs.
        TableToTable_conversion(roadsAliasTable, 'in_memory', 'roadsAliasTable') # == intermediateRoadsAliasTable
        #CopyFeatures_management(roadsAliasTable, intermediateRoadsAliasTable)
        
        roadsAliasTableView = MakeTableView_management(intermediateRoadsAliasTable, 'roadsAliasTableView')
        
        for geocodedAccident in geocodedAccidentsList:
            accCounter += 1
            print 'Working on geocodedAccident #' + str(accCounter)
            # Create a point here with the x & y from the geocodedAccident,
            # add the coordinate system, OBJECTID, and AccidentID
            # from the geocodedAccident layer.
            # Then, create a buffer with it.
            
            #if geocodedAccident[2] is not None and geocodedAccident[3] is not None:
            tempPoint = Point(geocodedAccident[2], geocodedAccident[3])
            #print "\t " + str(tempPoint.X) + ", " + str(tempPoint.Y)
            tempPointGeometry = PointGeometry(tempPoint, descSpatialReference)
            accidentDistanceOffset = geocodedAccident[7]
            accidentClusterTolerance = 1
            
            
            try:
                #####################
                # Offsetting while using a WhereClause follows:
                #####################
                if accidentDistanceOffset is not None: # In Python it's None, whereas in an ArcGIS table it's <null>
                    
                    accidentDistanceOffset = int(accidentDistanceOffset)
                    
                    if accidentDistanceOffset != 0:
                        
                        Buffer_analysis(tempPointGeometry, intermediateAccidentBuffer, accidentDistanceOffset)
                        
                        firstRoadName = str(geocodedAccident[5])
                        if firstRoadName is not None:
                            firstRoadName = firstRoadName.upper()
                        else:
                            firstRoadName = 'NotAValidRoad'
                        
                        secondRoadName = str(geocodedAccident[8])
                        if secondRoadName is not None:
                            secondRoadName = secondRoadName.upper()
                        else:
                            secondRoadName = 'NotAValidRoad'
                        
                        thirdRoadName = ParseMatchAddr(geocodedAccident[9])
                        if thirdRoadName is not None:
                            thirdRoadName = thirdRoadName.upper()
                        else:
                            thirdRoadName = 'NotAValidRoad'
                        
                        roadNamesList = [firstRoadName, secondRoadName, thirdRoadName]
                        
                        aliasIDsList = getAliasIDs(roadNamesList, roadsAliasTableView)
                        
                        aliasIDsLength = len(aliasIDsList)
                        
                        if aliasIDsLength != 0:
                            aliasIDsString = """("""
                            for x in xrange(aliasIDsLength):
                                if (x != (aliasIDsLength - 1)):
                                    aliasIDsString += """'""" + aliasIDsList[x] + """',"""
                                else:
                                    aliasIDsString += """'""" + aliasIDsList[x] + """')"""
                            
                            streetWhereClause = (""" "RD" = '""" + firstRoadName  + """'""" + """ OR """ +
                                                """ "LABEL" = '""" + firstRoadName + """'""" + """ OR """ +
                                                """ "RD" = '""" + secondRoadName  + """'""" + """ OR """ +
                                                """ "LABEL" = '""" + secondRoadName + """'""" + """ OR """ +
                                                """ "RD" = '""" + thirdRoadName  + """'""" + """ OR """ +
                                                """ "LABEL" = '""" + thirdRoadName + """'""" +
                                                """ OR GCID IN """ + aliasIDsString)
                        else:
                            #Without the aliasIDs.
                            streetWhereClause = (""" "RD" = '""" + firstRoadName  + """'""" + """ OR """ +
                                                """ "LABEL" = '""" + firstRoadName + """'""" + """ OR """ +
                                                """ "RD" = '""" + secondRoadName  + """'""" + """ OR """ +
                                                """ "LABEL" = '""" + secondRoadName + """'""" + """ OR """ +
                                                """ "RD" = '""" + thirdRoadName  + """'""" + """ OR """ +
                                                """ "LABEL" = '""" + thirdRoadName + """'""")
                        
                        
                        SelectLayerByAttribute_management(roadsAsFeatureLayer, "NEW_SELECTION", streetWhereClause)
                        
                        selectionCount = str(int(GetCount_management(roadsAsFeatureLayer).getOutput(0)))
                        
                        if int(selectionCount) != 0:
                            featuresToIntersect = [roadsAsFeatureLayer, intermediateAccidentBuffer]
                            Intersect_analysis(featuresToIntersect, intermediateAccidentIntersect, "ALL", "", "POINT")
                            
                            if  int(str(GetCount_management(intermediateAccidentIntersect))) > 0:
                                MultipartToSinglepart_management(intermediateAccidentIntersect, intermediateAccidentIntersectSinglePart)
                                
                                singlePartsCursor = SearchCursor(intermediateAccidentIntersectSinglePart, ['SHAPE@XY'])
                                for singlePart in singlePartsCursor:
                                    singlePartListItem = [singlePart[0], geocodedAccident[2], geocodedAccident[3], geocodedAccident[4],
                                                               geocodedAccident[6], geocodedAccident[0]]
                                    singlePartOffsetAccidentsList.append(singlePartListItem)
                                
                                try:
                                    del singlePartsCursor
                                except:
                                    pass
                            else:
                                pass
                            try:
                                del intermediateAccidentIntersect
                            except:
                                pass
                        else:
                            pass
                            #print 'Zero road segments selected. Will not attempt to offset.'
                    else:
                        pass
                        #print 'AT_ROAD_DIST_FEET is 0. Will not attempt to offset.
                else:
                    pass
                    #print 'AT_ROAD_DIST_FEET is null. Will not attempt to offset.'
                
            except:
                # Need to log the warnings with details so that I know what's wrong with them.
                print "WARNING:"
                print "An error occurred which prevented the accident point with Acc_Key: " + str(geocodedAccident[4])
                print "from being buffered and/or offset properly."
                errorItem = sys.exc_info()[1]
                errorStatement = str(errorItem.args[0])
                print errorStatement
                
                try:
                    del errorItem
                except:
                    pass
    
    
    elif whereClauseFlag == False:
        for geocodedAccident in geocodedAccidentsList:
            
            # Create a point here with the x & y from the geocodedAccident,
            # add the coordinate system, OBJECTID, and AccidentID
            # from the geocodedAccident layer.
            # Then, create a buffer with it.
            
            #if geocodedAccident[2] is not None and geocodedAccident[3] is not None:
            tempPoint = Point(geocodedAccident[2], geocodedAccident[3])
            #print "\t " + str(tempPoint.X) + ", " + str(tempPoint.Y)
            tempPointGeometry = PointGeometry(tempPoint, descSpatialReference)
            accidentDistanceOffset = geocodedAccident[7]
            ##accidentClusterTolerance = 2
            
            try:
                #####################
                # Offsetting while not using a WhereClause follows:
                #####################
                
                if accidentDistanceOffset is not None:
                    if int(accidentDistanceOffset) != 0:
                        accidentDistanceOffset = int(accidentDistanceOffset)
                        
                        Buffer_analysis(tempPointGeometry, intermediateAccidentBuffer, accidentDistanceOffset)
                        
                        featuresToIntersect = [roadsAsFeatureLayer, intermediateAccidentBuffer]
                        Intersect_analysis(featuresToIntersect, intermediateAccidentIntersect, "ALL", "", "POINT")
                        if  int(str(GetCount_management(intermediateAccidentIntersect))) > 0:
                            MultipartToSinglepart_management(intermediateAccidentIntersect, intermediateAccidentIntersectSinglePart)
                            
                            singlePartsCursor = SearchCursor(intermediateAccidentIntersectSinglePart, ['SHAPE@XY'])
                            for singlePart in singlePartsCursor:
                                singlePartListItem = [singlePart[0], geocodedAccident[2], geocodedAccident[3], geocodedAccident[4],
                                                           geocodedAccident[6], geocodedAccident[0]]
                                singlePartOffsetAccidentsList.append(singlePartListItem)
                            
                            try:
                                del singlePartsCursor
                            except:
                                pass
                            try:
                                del intermediateAccidentIntersect
                            except:
                                pass
                        else:
                            pass
                    else:
                        pass
                        # Need to change this to being offset to the intersection, i.e. no movement, but
                        # considered to be correctly offset all the same.
                        #print 'AT_ROAD_DIST_FEET is 0. Will not attempt to offset.' 
                else:
                    pass
                    #print 'AT_ROAD_DIST_FEET is null. Will not attempt to offset.'
            except:
                print "WARNING:"
                print "An error occurred which prevented the accident point with Acc_Key: " + str(geocodedAccident[4])
                print "from being buffered and/or offset properly."
                errorItem = sys.exc_info()[1]
                errorStatement = str(errorItem.args[0])
                print errorStatement
                
                try:
                    del errorItem
                except:
                    pass
    
    
    else:
        print 'Please set the whereClauseFlag to either (boolean) True or False.'
        #pass
    
    offsetDictionaryByAccidentKey = dict()
    listContainer = list()
    
    # Group the rows by accident_key for further analysis,
    # and add them to the dictionary/list/list data structure.
    
    for singlePartOffsetItem in singlePartOffsetAccidentsList:
        if singlePartOffsetItem[3] in offsetDictionaryByAccidentKey.keys():
            listContainer = offsetDictionaryByAccidentKey[singlePartOffsetItem[3]]
            listContainer.append(singlePartOffsetItem)
            offsetDictionaryByAccidentKey[singlePartOffsetItem[3]] = listContainer
        else:
            listContainer = list()
            listContainer.append(singlePartOffsetItem)
            offsetDictionaryByAccidentKey[singlePartOffsetItem[3]] = listContainer
    
    updateListValues = list()
    
    for accidentKey in offsetDictionaryByAccidentKey.keys():
        # accidentKey will be a unique accident key from the table
        listContainer = offsetDictionaryByAccidentKey[accidentKey]
        updateList = [-1, -1, -1, "False"]
        try:
            # Get the AT_ROAD_KDOT_DIRECTION/AT_ROAD_DIRECTION from the first (0th) entry.
            directionToTest = listContainer[0][4] 
            if directionToTest is not None:
                directionToTest = str(directionToTest).upper()
                updateList = findTheMostInterestingRow(listContainer, directionToTest, maximumDegreesDifference)
                if updateList[0] != -1:
                    updateListValues.append(updateList)
                else:
                    # -1 is not a valid Acc_Key. Slight optimization for the next for loop that uses this list so that
                    # it doesn't have to be re-checked each time through the list for each accident in the table.
                    pass
            else:
                print 'Direction to test is null.'
        except:
            pass
    
    accidentUpdateCursorFields = ['ACCIDENT_KEY', 'Shape@XY', 'isOffset']
    
    accidentUpdateCursor = UpdateCursor(outputWithOffsetLocations, accidentUpdateCursorFields)
    for cursorItem in accidentUpdateCursor:
        for updateListItem in updateListValues:
            if cursorItem[0] == updateListItem[0]:
                if str(cursorItem[2]).upper() == 'TRUE':    # Don't make any changes if true.
                    AddMessage('The accident point with Acc_Key: ' + str(cursorItem[0]) + ' is already offset.')
                else:                                       # Otherwise, offset the point.
                    editableCursorItem = list(cursorItem)
                    #AddMessage('Found a matching cursorItem with an Accident_Key of ' + str(cursorItem[0]) + ".")
                    editableCursorItem[1] = (updateListItem[1], updateListItem[2])
                    editableCursorItem[2] = updateListItem[3]
                    #AddMessage(str(editableCursorItem))
                    accidentUpdateCursor.updateRow(editableCursorItem)
                    
            else:
                pass
Ejemplo n.º 23
0
    def recalculate_mileposts(self):
        arcpy.AddMessage("recalculating mileposts")

        routes = self._get_unique_routes()

        #: Identity_analysis creates multipart features. Bust them up.
        arcpy.MultipartToSinglepart_management(
            self._output._intermediate_identity_city_and_county,
            self._output._intermediate_singlepart_data)

        #: Intermediate step to recalculate milepost values. Need to from values of road as points
        arcpy.FeatureVerticesToPoints_management(
            self._output._intermediate_singlepart_data,
            self._output._intermediate_feature_to_vertices, "BOTH_ENDS")
        routesCompleted = 0
        totalRoutes = len(routes)
        for route in routes:
            #Limit Locatefeature with a def query
            #: Creates table with new milepost values
            if routesCompleted % 10 == 0:
                arcpy.AddMessage("route recalculations remaining: {}".format(
                    totalRoutes - routesCompleted))
            route_with_direction = route
            route = route[:4]

            arcpy.MakeFeatureLayer_management(
                self._input.lrs_routes, "definitionQueryRoute{}".format(route),
                """{} = '{}'""".format(
                    arcpy.AddFieldDelimiters(self._input.lrs_routes,
                                             'RT_NAME'), route))

            self.delete_if_exists(
                [self._output._intermediate_mileposts_along_route])

            arcpy.LocateFeaturesAlongRoutes_lr(
                in_features=self._output._intermediate_feature_to_vertices,
                in_routes="definitionQueryRoute{}".format(route),
                route_id_field="RT_NAME",
                radius_or_tolerance="50 Meter",
                out_table=self._output._intermediate_mileposts_along_route,
                out_event_properties="RID POINT MEAS",
                route_locations="FIRST",
                distance_field=False,
                zero_length_events="ZERO",
                in_fields="FIELDS",
                m_direction_offsetting=True)

            new_mileposts = self._get_new_milepost_values(route_with_direction)

            where_clause = """{} = '{}'""".format(
                arcpy.AddFieldDelimiters(
                    self._output._intermediate_singlepart_data,
                    self._fields.route_name), route_with_direction)
            with UpdateCursor(
                    self._output._intermediate_singlepart_data,
                ("OID@", self._fields.from_milepost, self._fields.to_milepost),
                    where_clause) as cursor:
                for row in cursor:
                    original_feature_id = row[0]

                    if original_feature_id not in new_mileposts:
                        print "objectid: {} was not found along LRS Routes. Data mismatch?".format(
                            original_feature_id)
                        continue

                    mileposts = sorted(new_mileposts[original_feature_id])

                    if len(mileposts) is not 2:
                        raise Exception(
                            "Road segment with id {} does not fall within a 50 meter diameter of LRS data. Fix data or update radius_or_tolerance value."
                            .format(row[0]))

                    if mileposts[0] > mileposts[1]:
                        print "objectid: {} has to milepost smaller than from milepost. Data issue?".format(
                            original_feature_id)

                    row[1] = mileposts[0]
                    row[2] = mileposts[1]

                    cursor.updateRow(row)
            routesCompleted += 1
        return routes
def OffsetDirectionMatrix2(offsetOptions):
    """Update the accidentDataWithOffsetOutput geometry with data from geocodedFeatures.
    
    Keyword arguments to be included in the options class:
    gdbLocation -- The gdb where the outputWithOffsetLocations feature class resides.
    accidentDataAtIntersections -- A point feature class containing geocoded accident information.
    accidentDataWithOffsetOutput -- A point feature class with the same structure as the 
        geocodedFeatuers AND an "isOffset" row of type "TEXT" with length of at least 5.
    whereClauseInUse -- Whether or not the script will use a where clause. Boolean value.
    roadsFeaturesLocation -- The path to the local roads centerline feature class.
    aliasTable -- The path to the roads alias table for the roads centerline feature class.
    maxDegreesDifference -- The number of degrees that a potentially matching accident
        offset location can be from the direction specified. If this is set to -1, the check
        will be skipped and no matching accident offset locations will be rejected, even if
        they are in the opposite direction from where the accident record says they should
        be. I.e. the accident could be offset to the North when the accident record says that
        it should be South of the intersection when this check is skipped.
    XYFieldList -- The list of fields to use from the copy of the geocoded accidents feature
        class after that copy has had POINT_X and POINT_Y fields added and calculated.
    """
    
    ###########################################################################
    ## Function overview:
    ## For each row in the feature class of accidents that have been geolocated
    ## to an intersection:
    ###########################################################################
    # Make sure that the Status for the point is not 'U' -- Unlocated.
    # Might take care of test for 'U' points before getting to this
    # step in the process, but if not, be sure to test for it here.
    # Create/calculate intersection X & Y field named POINT_X and POINT_Y.
    # Then, calculate those fields.
    # Then, create a buffer.
    # Then, select the On_Road in the roads layer.
    # Then, intersect the buffer with the roads layer to create an offset
    # points layer.
    # Then, split the offset points from potential multipart points to
    # singlepart points.
    ###########################################################################
    # Then, use the "SHAPE@XY" token to access the X & Y of the individual
    # offset points and compare them to the X & Y values in the POINT_X and
    # POINT_Y fields, which hold the values for the related roads' intersection
    # that the accidents were geolocated to.
    # Then, test the intersected points to find the best one for the given
    # direction.
    ###########################################################################
    # Then, append the information for that point into a list.
    # Then, delete the buffer and intersection layer.
    # Repeat for each other row...
    ###########################################################################
    # When all the rows are finished,
    # Append the attribute information for the
    # related accident into each offset point's row.
    # Lastly, write the data for all the offset point rows
    # into the output layer.
    ###########################################################################
    # Maximum angle difference code confirmed to be working. -- 2015-03-18
    # 771/771 manually checked look good (for the information given) using
    # UpdateKdotNameInCenterline(), Where Clause for selection, and
    # Maximum Angle Difference.
    # Locates 771/862 non-'U' points without the modified versions of 
    # ON_ROAD_NAME/AT_ROAD/AT_ROAD_DIRECTION/AT_ROAD_DIST_FEET labeled fields
    # and 803/862 with them. 
    ###########################################################################
    
    AddMessage("The value of the useKDOTFields option is: " + str(offsetOptions.useKDOTFields))
    
    roadsToIntersect = offsetOptions.roadsFeaturesLocation
    roadsAliasTable = offsetOptions.aliasTable
    geocodedFeatures = offsetOptions.accidentDataAtIntersections
    outputWithOffsetLocations = offsetOptions.accidentDataWithOffsetOutput
    whereClauseFlag = offsetOptions.whereClauseInUse
    maximumDegreesDifference = offsetOptions.maxDegreesDifference
    KDOTFieldUse = offsetOptions.useKDOTFields
    
    AddMessage("The value for KDOTFieldUse is:" + str(KDOTFieldUse))
    
    if str(KDOTFieldUse).lower() == 'false':
        featuresWithXYFieldList = offsetOptions.NonKDOTXYFieldList
        AddMessage("Using nonKDOTXYFieldList.")
    else:
        featuresWithXYFieldList = offsetOptions.KDOTXYFieldList
    
    geodatabaseLocation = getGDBLocationFromFC(outputWithOffsetLocations)
    
    env.workspace = geodatabaseLocation
    env.overwriteOutput = True
    geocodedWhereClause = "STATUS <> 'U'"
    featuresWithXY = 'geocodedWithXY'
    geocodedLocXY = r'in_memory\geocodedFeatures_Loc_XY' # Changed this to an in_memory location also.
    
    # Scratch data locations
    intermediateAccidentBuffer = r'in_memory\intermediateAccidentBuffer'
    intermediateAccidentIntersect = r'in_memory\intermediateAccidentIntersect'
    intermediateAccidentIntersectSinglePart = r'in_memory\intermediateAccidentIntersectSinglePart'
    
    descSpatialReference = Describe(geocodedFeatures).spatialReference
    
    # Make a feature layer of geocodedFeatures using a where clause to restrict to those points
    # which have been located to an intersection, then add XY to it.
    MakeFeatureLayer_management(geocodedFeatures, featuresWithXY, geocodedWhereClause)
    CopyFeatures_management(featuresWithXY, geocodedLocXY)
    AddXY_management(geocodedLocXY)
    
    roadsAsFeatureLayer = 'NonStateRoadsFeatureLayer'
    
    # Check if the KDOT_ROUTENAME field was already added to the roads table.
    # If not, add it.
    # Then update the field using the best route name alias.
    
    UpdateKdotNameInCenterline(roadsToIntersect, roadsAliasTable)
    
    MakeFeatureLayer_management(roadsToIntersect, roadsAsFeatureLayer)
    
    # Use Point_X & Point_Y for the geolocated intersection location.
    # Use shape tokens for the x & y of the points which
    # result from intersecting the buffer & road geometries.    
    
    geocodedAccidentsList = list()
    singlePartOffsetAccidentsList = list()
    
    print "The path of the geocodedFeatures used is: " + geocodedFeatures
    
    #AddMessage("The field names used in the search cursor are:")
    #for fieldListItem in featuresWithXYFieldList:
    #    AddMessage(fieldListItem)
    
    accidentsCursor = SearchCursor(geocodedLocXY, featuresWithXYFieldList)
    
    for accidentRow in accidentsCursor:
        geocodedAccidentsList.append(accidentRow)
    
    try:
        del accidentsCursor
    except:
        pass
    
    for geocodedAccident in geocodedAccidentsList:
        
        # Create a point here with the x & y from the geocodedAccident,
        # add the coordinate system, OBJECTID, and AccidentID
        # from the geocodedAccident layer.
        # Then, create a buffer with it.
        
        #if geocodedAccident[2] is not None and geocodedAccident[3] is not None:
        tempPoint = Point(geocodedAccident[2], geocodedAccident[3])
        #print "\t " + str(tempPoint.X) + ", " + str(tempPoint.Y)
        tempPointGeometry = PointGeometry(tempPoint, descSpatialReference)
        accidentDistanceOffset = geocodedAccident[7]
        accidentClusterTolerance = 2
        
        
        ###########################################################################
        ## TODO: Add alternate names to a copy of the road centerline features
        ## and allow the where clause to select those alternate names as well
        ## i.e.:
        ##
        ##          streetWhereClause = (""" "RD" = '""" + firstRoadName  + """'""" + """ OR """ +
        ##                                  """ "RD_ALT_NAME_1" = '""" + firstRoadName  + """'""" + """ OR """ +
        ##                                  """ "RD_ALT_NAME_2" = '""" + firstRoadName  + """'""" + """ OR """ +
        ##                                  """ "KDOT_ROUTENAME" = '""" + firstRoadName + """'""" + """ OR """ +
        ##                                  """ "RD" = '""" + secondRoadName  + """'""" + """ OR """
        ##                                  """ "RD_ALT_NAME_1" = '""" + secondRoadName  + """'""" + """ OR """ +
        ##                                  """ "RD_ALT_NAME_2" = '""" + secondRoadName  + """'""" + """ OR """ +
        ##                                  """ "KDOT_ROUTENAME" = '""" + secondRoadName + """'""")                        
        ##
        ## Or similar. Get the alternate names for each county from the StreetsCounty_Int
        ## dataset, or a better one, if you can find one.
        ###########################################################################
        
        if whereClauseFlag == True:
            try:
                #####################
                # Offsetting while using a WhereClause follows:
                #####################
                if accidentDistanceOffset is not None: # In Python it's None, whereas in an ArcGIS table it's <null>
                    
                    accidentDistanceOffset = int(accidentDistanceOffset)
                    
                    if accidentDistanceOffset != 0:
                        
                        Buffer_analysis(tempPointGeometry, intermediateAccidentBuffer, accidentDistanceOffset)
                        
                        firstRoadName = str(geocodedAccident[5])
                        firstRoadName = firstRoadName.upper()
                        secondRoadName = str(geocodedAccident[8])
                        secondRoadName = secondRoadName.upper()
                        
                        thirdRoadName = ParseMatchAddr(geocodedAccident[9])
                        thirdRoadName = thirdRoadName.upper()
                        
                        # Going to need to get the first road from the Arc_Street field and make it
                        # the third road name option. Split based on | and use the first string.
                        # Make a function to parse the Match_Addr field to the matched On_Road name.
                        # Needs to separate by %, then remove anything like "W ", " AVE", etc...
                        
                        # Discovered a bug, missing "+" at the end of the 3rd line.
                        # Corrected 2015-07-20
                        streetWhereClause = (""" "RD" = '""" + firstRoadName  + """'""" + """ OR """ +
                                                """ "KDOT_ROUTENAME" = '""" + firstRoadName + """'""" + """ OR """ +
                                                """ "RD" = '""" + secondRoadName  + """'""" + """ OR """ +
                                                """ "KDOT_ROUTENAME" = '""" + secondRoadName + """'""" + """ OR """ +
                                                """ "RD" = '""" + thirdRoadName  + """'""" + """ OR """ +
                                                """ "KDOT_ROUTENAME" = '""" + thirdRoadName + """'""")
                        
                        SelectLayerByAttribute_management(roadsAsFeatureLayer, "NEW_SELECTION", streetWhereClause)
                        
                        selectionCount = str(GetCount_management(roadsAsFeatureLayer))
                        
                        if int(selectionCount) != 0:
                            featuresToIntersect = [roadsAsFeatureLayer, intermediateAccidentBuffer]
                            Intersect_analysis(featuresToIntersect, intermediateAccidentIntersect, "ALL", accidentClusterTolerance, "POINT")
                            
                            if  int(str(GetCount_management(intermediateAccidentIntersect))) > 0:
                                MultipartToSinglepart_management(intermediateAccidentIntersect, intermediateAccidentIntersectSinglePart)
                                
                                singlePartsCursor = SearchCursor(intermediateAccidentIntersectSinglePart, ['SHAPE@XY'])
                                for singlePart in singlePartsCursor:
                                    singlePartListItem = [singlePart[0], geocodedAccident[2], geocodedAccident[3], geocodedAccident[4],
                                                               geocodedAccident[6], geocodedAccident[0]]
                                    singlePartOffsetAccidentsList.append(singlePartListItem)
                                
                                try:
                                    del singlePartsCursor
                                except:
                                    pass
                            else:
                                pass
                        else:
                            pass
                            #print 'Zero road segments selected. Will not attempt to offset.'
                    else:
                        pass
                        #print 'AT_ROAD_DIST_FEET is 0. Will not attempt to offset.
                else:
                    pass
                    #print 'AT_ROAD_DIST_FEET is null. Will not attempt to offset.'
            except:
                print "WARNING:"
                print "An error occurred which prevented the accident point with Acc_Key: " + str(geocodedAccident[4])
                print "from being buffered and/or offset properly."
        
        elif whereClauseFlag == False:
            try:
                #####################
                # Offsetting while not using a WhereClause follows:
                #####################
                
                if accidentDistanceOffset is not None:
                    if int(accidentDistanceOffset) != 0:
                        accidentDistanceOffset = int(accidentDistanceOffset)
                        
                        Buffer_analysis(tempPointGeometry, intermediateAccidentBuffer, accidentDistanceOffset)
                        
                        featuresToIntersect = [roadsAsFeatureLayer, intermediateAccidentBuffer]
                        Intersect_analysis(featuresToIntersect, intermediateAccidentIntersect, "ALL", accidentClusterTolerance, "POINT")
                        if  int(str(GetCount_management(intermediateAccidentIntersect))) > 0:
                            MultipartToSinglepart_management(intermediateAccidentIntersect, intermediateAccidentIntersectSinglePart)
                            
                            singlePartsCursor = SearchCursor(intermediateAccidentIntersectSinglePart, ['SHAPE@XY'])
                            for singlePart in singlePartsCursor:
                                singlePartListItem = [singlePart[0], geocodedAccident[2], geocodedAccident[3], geocodedAccident[4],
                                                           geocodedAccident[6], geocodedAccident[0]]
                                singlePartOffsetAccidentsList.append(singlePartListItem)
                            
                            try:
                                del singlePartsCursor
                            except:
                                pass
                        else:
                            pass
                    else:
                        pass
                        # Need to change this to being offset to the intersection, i.e. no movement, but
                        # considered to be correctly offset all the same.
                        #print 'AT_ROAD_DIST_FEET is 0. Will not attempt to offset.' 
                else:
                    pass
                    #print 'AT_ROAD_DIST_FEET is null. Will not attempt to offset.'
            except:
                print "WARNING:"
                print "An error occurred which prevented the accident point with Acc_Key: " + str(geocodedAccident[4])
                print "from being buffered and/or offset properly."
        
        else:
            pass
            #print 'Please set the whereClauseFlag to either (boolean) True or False.'
    
    offsetDictionaryByAccidentKey = dict()
    listContainer = list()
    
    # Group the rows by accident_key for further analysis,
    # and add them to the dictionary/list/list data structure.
    
    for singlePartOffsetItem in singlePartOffsetAccidentsList:
        if singlePartOffsetItem[3] in offsetDictionaryByAccidentKey.keys():
            listContainer = offsetDictionaryByAccidentKey[singlePartOffsetItem[3]]
            listContainer.append(singlePartOffsetItem)
            offsetDictionaryByAccidentKey[singlePartOffsetItem[3]] = listContainer
        else:
            listContainer = list()
            listContainer.append(singlePartOffsetItem)
            offsetDictionaryByAccidentKey[singlePartOffsetItem[3]] = listContainer
    
    updateListValues = list()
    
    for accidentKey in offsetDictionaryByAccidentKey.keys():
        # accidentKey will be a unique accident key from the table
        listContainer = offsetDictionaryByAccidentKey[accidentKey]
        updateList = [-1, -1, -1, "False"]
        try:
            directionToTest = listContainer[0][4] # Get the AT_ROAD_KDOT_DIRECTION from the first entry.
            if directionToTest is not None:
                directionToTest = str(directionToTest).upper()
                updateList = findTheMostInterestingRow(listContainer, directionToTest, maximumDegreesDifference)
                updateListValues.append(updateList)
            else:
                print 'Direction to test is null.'
        except:
            pass
    
    accidentUpdateCursorFields = ['ACCIDENT_KEY', 'Shape@XY', 'isOffset']
    
    accidentUpdateCursor = UpdateCursor(outputWithOffsetLocations, accidentUpdateCursorFields)
    for cursorItem in accidentUpdateCursor:
        for updateListItem in updateListValues:
            if cursorItem[0] == updateListItem[0]:
                if str(cursorItem[2]).upper() == 'TRUE':    # Don't make any changes if true.
                    AddMessage('The accident point with Acc_Key: ' + str(cursorItem[0]) + ' is already offset.')
                else:                                       # Otherwise, offset the point.
                    editableCursorItem = list(cursorItem)
                    #AddMessage('Found a matching cursorItem with an Accident_Key of ' + str(cursorItem[0]) + ".")
                    editableCursorItem[1] = (updateListItem[1], updateListItem[2])
                    editableCursorItem[2] = updateListItem[3]
                    #AddMessage(str(editableCursorItem))
                    accidentUpdateCursor.updateRow(editableCursorItem)
                    
            else:
                pass # This will always happen when updateListItem has a value of -1 in the 0th position, since that is not a valid Acc_Key.
Ejemplo n.º 25
0
def main():
    # tool inputs
    INPUT_NETWORK = argv[1]
    INPUT_POINTS = argv[2]
    INPUT_ORIGINS_FIELD = argv[3]
    INPUT_DESTINATIONS_FIELD = argv[4]
    INPUT_COEFF = float(argv[5])
    INPUT_SEARCH_RADIUS = float(argv[6]) if is_number(
        argv[6]) else float('inf')
    INPUT_OUTPUT_DIRECTORY = argv[7]
    INPUT_OUTPUT_FEATURE_CLASS_NAME = argv[8]
    INPUT_COMPUTE_WAYFINDING = argv[9] == "true"
    INPUT_VISUALIZATION = argv[10]

    # check that network has "Length" attribute
    if "Length" not in network_cost_attributes(INPUT_NETWORK):
        AddError("Network <%s> does not have Length attribute" % INPUT_NETWORK)
        return

    # check that coeff is at least 1
    if INPUT_COEFF < 1:
        AddError("Redundancy coefficient <%s> must be at least 1" %
                 INPUT_COEFF)
        return

    # extract origin and destination ids
    origin_ids = flagged_points(INPUT_POINTS, INPUT_ORIGINS_FIELD)
    if len(origin_ids) != 1:
        AddError("Number of origins <%s> must be 1" % len(origin_ids))
        return
    origin_id = origin_ids[0]
    destination_ids = flagged_points(INPUT_POINTS, INPUT_DESTINATIONS_FIELD)
    if len(destination_ids) == 0 or origin_ids == destination_ids:
        AddWarning("No OD pair found, no computation will be done")
        return

    # check that the output file does not already exist
    output_feature_class = "%s.shp" % join(INPUT_OUTPUT_DIRECTORY,
                                           INPUT_OUTPUT_FEATURE_CLASS_NAME)
    if Exists(output_feature_class):
        AddError("Output feature class <%s> already exists" %
                 output_feature_class)
        return

    # obtain visualization method
    visualize_segments = visualize_polylines = False
    if INPUT_VISUALIZATION == "Unique Segments":
        visualize_segments = True
    elif INPUT_VISUALIZATION == "Path Polylines":
        visualize_polylines = True
    elif INPUT_VISUALIZATION != "None":
        AddError("Visualization method <%s> must be one of 'Unique Segments', "
                 "'Path Polylines', or 'None'" % INPUT_VISUALIZATION)
        return

    # setup
    env.overwriteOutput = True

    # construct network and points
    network, points, edge_to_points = construct_network_and_load_buildings(
        INPUT_POINTS, INPUT_NETWORK)

    # find redundant paths for each origin-destination
    AddMessage("Computing redundant paths ...")
    progress_bar = Progress_Bar(len(destination_ids), 1, "Finding paths ...")
    # build output table one row at a time, starting from header row
    answers = [["OrigID", "DestID", "NumPaths", "Redundancy"]]
    if INPUT_COMPUTE_WAYFINDING:
        answers[0].append("Wayfinding")
    # visualization state
    if visualize_polylines:
        polylines = []
        polyline_data = []
    elif visualize_segments:
        all_unique_segment_counts = defaultdict(int)
    for destination_id in destination_ids:
        if origin_id != destination_id:
            all_paths = find_all_paths(network, points, INPUT_COEFF, origin_id,
                                       destination_id, INPUT_SEARCH_RADIUS,
                                       INPUT_COMPUTE_WAYFINDING)
            if all_paths is not None:
                if INPUT_COMPUTE_WAYFINDING:
                    (all_path_points, unique_segment_counts, num_paths,
                     redundancy, waypoint) = all_paths
                    answers.append([
                        origin_id, destination_id, num_paths, redundancy,
                        waypoint
                    ])
                else:
                    (all_path_points, unique_segment_counts, num_paths,
                     redundancy) = all_paths
                    answers.append(
                        [origin_id, destination_id, num_paths, redundancy])
                if visualize_polylines:
                    for i, path_points in enumerate(all_path_points):
                        polylines.append(
                            Polyline(
                                Array([
                                    Point(*coords) for coords in path_points
                                ])))
                        polyline_data.append((origin_id, destination_id, i))
                elif visualize_segments:
                    for edge_id in unique_segment_counts:
                        all_unique_segment_counts[
                            edge_id] += unique_segment_counts[edge_id]
        progress_bar.step()
    AddMessage("\tDone.")

    # write out results
    if len(answers) > 1:
        AddMessage("Writing out results ...")
        # write out to a table
        write_rows_to_csv(answers, INPUT_OUTPUT_DIRECTORY,
                          INPUT_OUTPUT_FEATURE_CLASS_NAME)
        # visualize
        if visualize_polylines:
            CopyFeatures_management(polylines, output_feature_class)
            data_fields = ["OrigID", "DestID", "PathID"]
            for field in data_fields:
                AddField_management(in_table=output_feature_class,
                                    field_name=field,
                                    field_type="INTEGER")
            rows = UpdateCursor(output_feature_class, data_fields)
            for j, row in enumerate(rows):
                row[0], row[1], row[2] = polyline_data[j]
                rows.updateRow(row)
            # create a layer of the polylines shapefile and symbolize
            polylines_layer_name = "%s_layer" % INPUT_OUTPUT_FEATURE_CLASS_NAME
            polylines_layer = "%s.lyr" % join(INPUT_OUTPUT_DIRECTORY,
                                              INPUT_OUTPUT_FEATURE_CLASS_NAME)
            MakeFeatureLayer_management(output_feature_class,
                                        polylines_layer_name)
            SaveToLayerFile_management(polylines_layer_name, polylines_layer,
                                       "ABSOLUTE")
            ApplySymbologyFromLayer_management(
                polylines_layer,
                join(path[0],
                     "Symbology_Layers\sample_polylines_symbology.lyr"))
            add_layer_to_display(polylines_layer)
        elif visualize_segments:
            id_mapping, edges_file = select_edges_from_network(
                INPUT_NETWORK, all_unique_segment_counts.keys(),
                INPUT_OUTPUT_DIRECTORY,
                "%s_edges" % INPUT_OUTPUT_FEATURE_CLASS_NAME)
            AddField_management(in_table=edges_file,
                                field_name="PathCount",
                                field_type="INTEGER")
            rows = UpdateCursor(edges_file, ["OID@", "PathCount"])
            for row in rows:
                row[1] = all_unique_segment_counts[id_mapping[row[0]]]
                rows.updateRow(row)
        AddMessage("\tDone.")
    else:
        AddMessage("No results to write out.")
Ejemplo n.º 26
0
def UpdateKdotNameInCenterline(centerlineToIntersect, centerlineAliasTable):
    ###############################################################################
    # Create a list here for output and then use logic on the dictionary to decide
    # what value you want the KDOT_ROUTENAME in the centerline feature class to have.
    # Then, use an update cursor to match the SEGID with the value to update.
    ###############################################################################
    
    # Need to check to see if the centerlineToIntersect has a field that already
    # exists for the KDOT_ROUTENAME, and if not, create one.
    
    # Create a list of fields using the ListFields function
    fieldsList = ListFields(centerlineToIntersect)
    
    fieldNamesList = list()
    
    # Iterate through the list of fields
    for field in fieldsList:
        fieldNamesList.append(field.name)
    
    # If the KDOT_ROUTENAME field is not found,
    # add it with adequate parameters.
    if "KDOT_ROUTENAME" not in fieldNamesList:
        #AddMessage("Adding KDOT_ROUTENAME to " + centerlineToIntersect + ".")
        #previousWorkspace = env.workspace  # @UndefinedVariable
        addFieldWorkspace = getGDBLocationFromFC(centerlineToIntersect)
        env.workspace = addFieldWorkspace
        
        fieldNameToAdd = "KDOT_ROUTENAME"
        fieldLength = 10
        
        AddField_management(centerlineToIntersect, fieldNameToAdd, "TEXT", "", "", fieldLength)
        
        # Set the workspace back to what it was previously to prevent
        # problems from occurring in the rest of the script.
        #env.workspace = previousWorkspace
        AddMessage("The " + str(fieldNameToAdd) + " field was added to " + str(centerlineToIntersect) + ".")
    
    else:
        AddMessage("The KDOT_ROUTENAME field already exists within " + centerlineToIntersect + ".")
        AddMessage("It will not be added again, but its values will be updated (where necessary).")
    
    aliasFields = ['SEGID', 'KDOT_ROUTENAME']
    
    #for fieldNameItem in fieldNamesList:
        #print fieldNameItem
    
    aliasCursor = SearchCursor(centerlineAliasTable, aliasFields)
    
    aliasList = list()
    
    for aliasRow in aliasCursor:
        if aliasRow[1] is not None:
            aliasList.append(aliasRow)
        else:
            pass
    try:
        del aliasCursor
    except:
        pass
    
    aliasDictionary = dict()
    
    for aliasListItem in aliasList:
        if aliasListItem[0] in aliasDictionary.keys():
            listContainer = aliasDictionary[aliasListItem[0]]
            listContainer.append(aliasListItem)
            aliasDictionary[aliasListItem[0]] = listContainer
        else:
            listContainer = list()
            listContainer.append(aliasListItem)
            aliasDictionary[aliasListItem[0]] = listContainer
    
    aliasListForUpdate = list()
    
    for aliasDictKey in aliasDictionary.keys():
        listContainer = aliasDictionary[aliasDictKey]
        bestRouteName = ''
        for listContainerItem in listContainer:
            currentRouteName = listContainerItem[1]
            # Logic to decide route to use based on route dominance is in
            # the compareRouteNames function.
            bestRouteName = compareRouteNames(bestRouteName, currentRouteName)
            
        aliasListForUpdate.append((aliasDictKey, bestRouteName))
    
    # Have to start an edit session because the feature class participates in a topology.
    try:
        editWorkspace = getGDBLocationFromFC(centerlineToIntersect)
                
        editSession = Editor(editWorkspace)
        
        editSession.startEditing(False, False)
        
        editSession.startOperation()
        
        routeToUpdateCursor = UpdateCursor(centerlineToIntersect, aliasFields)
        
        for routeToUpdate in routeToUpdateCursor:
            routeToUpdate = list(routeToUpdate)
            for aliasForUpdate in aliasListForUpdate:
                if routeToUpdate[0] == aliasForUpdate[0]:
                    routeToUpdate[1] = aliasForUpdate[1]
                else:
                    pass
            
            routeToUpdateCursor.updateRow(routeToUpdate)
        
        del routeToUpdateCursor
        
        editSession.stopOperation()
        
        editSession.stopEditing(True)
    
    except ExecuteError:
       AddMessage((GetMessages(2)))
Ejemplo n.º 27
0
# Name of fields 
nameField = "NAME"
parkAndRideField = "HasTwoParkAndRides"

# Name of geography data of park and ride
parkAndRide = "ParkAndRide"

# Variables to count the total of city and the total of city with two or more park and ride 
CityWithTwoParkAndRides = 0
totalCity = 0

# Try find which cities have at least two park and rides within their boundaries
try:
    
    #
    with UpdateCursor(cityBoundaries, (nameField, parkAndRideField)) as cityRows:

        # For each feature of city in cityBoundaries geography data
        for row in cityRows:

            # Get the name of each city
            nameCity = row[0]

            # 
            queryString = '"' + nameField + '" = ' + "'" + nameCity + "'"

            # Make a feature layer of just the current city polygon
            # The queryString is reponsible for select only the feature of city
            MakeFeatureLayer_management(cityBoundaries, "CurrentCityLayer", queryString) 
            MakeFeatureLayer_management(parkAndRide, "ParkAndRide_lry")
Ejemplo n.º 28
0
    MakeFeatureLayer_management(parkAndRide,"ParkAndRide_lry")

    # Select only the cities that contain point of park and ride
    SelectLayerByLocation_management("CityBoundaries_lyr", "CONTAINS", "ParkAndRide_lry")

# If happen some error
except:

    # Show the massage
    print 'It not possible to make a layer'

# Try update field
try:

    # Select the field HasParkAndRide of CityBoundaries layer 
    with UpdateCursor("CityBoundaries_lyr", (nameField,)) as cursor:

        # For each row in field
        for row in cursor:

            # Update row to 'True'
            row[0] = "True"

# If happen some error            
except:
    # Show the massage
    print "It not possible update a field: ", nameField, "for the selected features"

# Get the total value of cities in CityBoundaries 
city = GetCount_management("CityBoundaries")
totalCity = int(city.getOutput(0))
Ejemplo n.º 29
0
geographicDataOrder = "sampaOrder"

# Setting the name of fields
whiteField = "white"
idField = "CD_GEOCODI"

# Setting the demographic data (csv)
demographicData = open("C:\\demographicMaps\\tabelas\\Pessoa03_SP1.csv")
demographicCsv = csv.reader(demographicData, delimiter=";")

# Skip the index and order the csv
demographicCsv.next()
demographicSorted = sorted(demographicCsv, key=operator.itemgetter(0))

# Update the value of geographic data with each data in csv
with UpdateCursor(geographicDataOrder + ".shp",
                  (whiteField, idField)) as geographicRows:

    # Walks for each row in geographic data and each row in csv
    for geographicRow in geographicRows:
        for demographicRow in demographicSorted:

            # Check if the code area of geographic data is equal the code area of demographic data
            if str(geographicRow[1]) == str(demographicRow[0]):

                # Write the value of csv in geographic data
                idDemographicRow = str(
                    demographicRow[3]
                )  # The value in location 3 is the value of white people in census area
                geographicRow[0] = idDemographicRow

                # Salve the changes
Ejemplo n.º 30
0
def get_USGS_metadata(usgs_fc):
    """
    Access the USGS site information REST API to get the basin area
    for all applicable sites. Adds the basinarea field to the FC and
    writes the data returned from the REST serivce.

    Required: usgs_fc -- the feature class of records from the AWDB

    Returns:  None
    """

    import urllib
    import gzip
    from re import search
    from arcpy import ListFields, AddField_management
    from arcpy.da import SearchCursor, UpdateCursor
    import io

    # check for area field and add if missing
    fields = ListFields(usgs_fc)

    for fieldname, datatype in NEW_FIELDS:
        for field in fields:
            if field.name == fieldname:
                break
        else:
            AddField_management(usgs_fc, fieldname, datatype)

    # get a list of station IDs in the FC
    stationIDs = []
    with SearchCursor(usgs_fc, STATION_ID_FIELD) as cursor:
        for row in cursor:
            sid = row[0].split(":")[0]
            # valid USGS station IDs are between 8 and 15 char and are numerical
            if len(sid) >= 8 and not search('[a-zA-Z]', sid):
                stationIDs.append(sid)

    # setup and get the HTTP request
    request = urllib.request.Request(
        settings.USGS_URL,
        urllib.parse.urlencode({
            "format": "rdb",  # get the data in USGS rdb format
            "sites": ",".join(stationIDs),  # the site IDs to get, separated by commas
            "siteOutput": "expanded"  # expanded output includes basin area
            #"modifiedSince": "P" + str(SCRIPT_FREQUENCY) + "D"  # only get records modified since last run
        }).encode('utf-8')
    )

    # allow gzipped response
    request.add_header('Accept-encoding', 'gzip')
    response = urllib.request.urlopen(request)

    # check to see if response is gzipped and decompress if yes
    if response.info().get('Content-Encoding') == 'gzip':
        buf = io.BytesIO(response.read())
        data = gzip.GzipFile(fileobj=buf)
    else:
        data = response

    # parse the response and create a dictionary keyed on the station ID
    stationAreas = {}
    for line in data.readlines():
        line = line.decode('utf-8')
        if line.startswith('USGS'):
            # data elements in line (station record) are separated by tabs
            line = line.split('\t')
            # the 2nd element is the station ID, 3rd is the name,
            # and the 30th is the area
            # order in the tuple is important,
            # so data is entered into the correct fields in the table
            stationAreas[line[1]] = (line[29], line[1], line[2])

    # write the response data to the FC
    fieldsToAccess = [STATION_ID_FIELD]+[name for name, datatype in NEW_FIELDS]
    with UpdateCursor(usgs_fc, fieldsToAccess) as cursor:
        for row in cursor:
            stationid = row[0].split(":")[0]

            try:
                # row[1] is area
                row[1] = float(stationAreas[stationid][0])
            except KeyError:
                # in case no record was returned for ID
                # skip to next record
                continue
            except ValueError:
                # in case area returned is ""
                pass

            try:
                # row[2] is the USGS station ID
                row[2] = stationAreas[stationid][1]
            except ValueError:
                # in case ID returned is ""
                pass

            try:
                # row[3] is the USGS station name
                row[3] = stationAreas[stationid][2]
            except ValueError:
                # in case name returned is ""
                pass

            # no exception so data valid, update row
            cursor.updateRow(row)
Ejemplo n.º 31
0
def main():
    # tool inputs
    INPUT_NETWORK = argv[1]
    INPUT_POINTS = argv[2]
    INPUT_ORIGINS_FIELD = argv[3]
    INPUT_DESTINATIONS_FIELD = argv[4]
    INPUT_BUILDING_WEIGHTS_FIELD = argv[5]
    INPUT_COEFF = float(argv[6])
    INPUT_SEARCH_RADIUS = float(argv[7]) if is_number(argv[7]) else float('inf')
    INPUT_OUTPUT_DIRECTORY = argv[8]
    INPUT_OUTPUT_FEATURE_CLASS_NAME = argv[9]

    # check that network has "Length" attribute
    if "Length" not in network_cost_attributes(INPUT_NETWORK):
      AddError("Network <%s> does not have Length attribute" % INPUT_NETWORK)
      return

    # check that coeff is at least 1
    if INPUT_COEFF < 1:
      AddError("Redundancy coefficient <%s> must be at least 1" % INPUT_COEFF)
      return

    # if we are given a building weights field, check that it is valid
    if INPUT_BUILDING_WEIGHTS_FIELD == "#":
      INPUT_BUILDING_WEIGHTS_FIELD = ""
    if INPUT_BUILDING_WEIGHTS_FIELD and (INPUT_BUILDING_WEIGHTS_FIELD not in
        fields(INPUT_POINTS)):
      AddError("Building weights field <%s> is not a valid attribute in the "
          "input points <%s>" % (INPUT_BUILDING_WEIGHTS_FIELD, INPUT_POINTS))
      return

    # setup
    env.overwriteOutput = True

    # copy the input points into an output feature class
    AddMessage("Copying input points to output feature class ...")
    input_points_layer = Layer(INPUT_POINTS)
    output_feature_class = "%s.shp" % join(INPUT_OUTPUT_DIRECTORY,
        INPUT_OUTPUT_FEATURE_CLASS_NAME)
    CopyFeatures_management(in_features=input_points_layer,
        out_feature_class=output_feature_class)
    AddMessage("\tDone.")

    # construct network and points
    network, points, edge_to_points = construct_network_and_load_buildings(
        INPUT_POINTS, INPUT_NETWORK, INPUT_BUILDING_WEIGHTS_FIELD)

    # extract origin and destination ids
    origin_ids = flagged_points(INPUT_POINTS, INPUT_ORIGINS_FIELD)
    destination_ids = flagged_points(INPUT_POINTS, INPUT_DESTINATIONS_FIELD)
    if len(origin_ids) == 0 or len(destination_ids) == 0 or (
        len(origin_ids) == 1 and origin_ids == destination_ids):
      AddWarning("No OD pair found, no computation will be done.")

    # compute redundancy index statistics for each origin point
    AddMessage("Computing redundancy indices ...")
    redundancy_indices = {}
    # memoize: computing index from O to D is same as computing it from D to O
    memo = {}
    for origin_id in origin_ids:
      progress_bar = Progress_Bar(len(destination_ids), 1,
          "Computing index for O=%s ..." % origin_id)
      # statistics variables
      tot_redundancy_index = 0
      tot_squared_redundancy_index = 0
      min_redundancy_index = None
      max_redundancy_index = None
      all_unique_segments = set()
      # track the number of destinations for which a numeric redundancy index is
      #     successfully computed
      n = 0
      for destination_id in destination_ids:
        if origin_id != destination_id:
          memo_key = (min(origin_id, destination_id), max(origin_id,
              destination_id))
          if memo_key not in memo:
            memo[memo_key] = find_redundancy_index(network, points,
                edge_to_points, INPUT_COEFF, origin_id, destination_id,
                INPUT_SEARCH_RADIUS, bool(INPUT_BUILDING_WEIGHTS_FIELD))
          if memo[memo_key] is not None:
            n += 1
            redundancy_pair, unique_segments_pair = memo[memo_key]
            min_redundancy_index = (min(min_redundancy_index, redundancy_pair)
                if min_redundancy_index is not None else redundancy_pair)
            max_redundancy_index = (max(max_redundancy_index, redundancy_pair)
                if max_redundancy_index is not None else redundancy_pair)
            tot_redundancy_index += redundancy_pair
            tot_squared_redundancy_index += redundancy_pair * redundancy_pair
            all_unique_segments |= unique_segments_pair
        progress_bar.step()
      if n > 0:
        avg_redundancy_index = tot_redundancy_index / n
        avg_squared_redundancy_index = tot_squared_redundancy_index / n
      else:
        avg_redundancy_index = avg_squared_redundancy_index = 0
      # TODO(mikemeko): work on std computation with better accuracy
      std = sqrt(max(avg_squared_redundancy_index - avg_redundancy_index *
          avg_redundancy_index, 0))
      if min_redundancy_index is None:
        min_redundancy_index = 0
      if max_redundancy_index is None:
        max_redundancy_index = 0
      redundancy_indices[origin_id] = (n, avg_redundancy_index, std,
          min_redundancy_index, max_redundancy_index, all_unique_segments)
    AddMessage("\tDone.")

    # write out redundancy statistics to output feature class
    # delete all points that are not origins from the output feature class
    AddMessage("Writing out results ...")
    int_fields = ["InputID", "Reach"]
    double_fields = ["AvgRedund", "StdRedund", "MinRedund", "MaxRedund"]
    for field in int_fields:
      AddField_management(in_table=output_feature_class, field_name=field,
          field_type="INTEGER")
    for field in double_fields:
      AddField_management(in_table=output_feature_class, field_name=field,
          field_type="DOUBLE")
    rows = UpdateCursor(output_feature_class,
        ["OID@"] + int_fields + double_fields)
    for row in rows:
      oid = row[0]
      if Describe(INPUT_POINTS).extension != "shp":
        # original ids start from 1, but shapefile ids start from 0, so add
        #     1 to shapefile id for correct matching
        oid += 1
      if oid in redundancy_indices:
        n, avg, std, m, M, all_unique_segments = redundancy_indices[oid]
        row[1:] = [oid, n, avg, std, m, M]
        rows.updateRow(row)
      else:
        rows.deleteRow()
    # create a layer of the output feature class, for symbology purposes
    output_layer = "%s.lyr" % join(INPUT_OUTPUT_DIRECTORY,
        INPUT_OUTPUT_FEATURE_CLASS_NAME)
    MakeFeatureLayer_management(in_features=output_feature_class,
        out_layer=INPUT_OUTPUT_FEATURE_CLASS_NAME)
    SaveToLayerFile_management(INPUT_OUTPUT_FEATURE_CLASS_NAME, output_layer,
        "ABSOLUTE")
    # add output feature layer to display after applying symbology
    ApplySymbologyFromLayer_management(output_layer, join(path[0],
        "Symbology_Layers\sample_points_symbology.lyr"))
    add_layer_to_display(output_layer)
    # if there is only one origin, symbolize selected edges
    if _common_id(memo.keys()) and len(all_unique_segments) > 0:
      n, avg, std, m, M, all_unique_segments = redundancy_indices[origin_ids[0]]
      select_edges_from_network(INPUT_NETWORK, all_unique_segments,
          INPUT_OUTPUT_DIRECTORY, "%s_edges" % INPUT_OUTPUT_FEATURE_CLASS_NAME)
    AddMessage("\tDone.")
def UpdateKdotNameInCenterline(centerlineToIntersect, centerlineAliasTable):
    ###############################################################################
    # Create a list here for output and then use logic on the dictionary to decide
    # what value you want the KDOT_ROUTENAME in the centerline feature class to have.
    # Then, use an update cursor to match the SEGID with the value to update.
    ###############################################################################
    
    # Need to check to see if the centerlineToIntersect has a field that already
    # exists for the KDOT_ROUTENAME, and if not, create one.
    
    # Create a list of fields using the ListFields function
    fieldsList = ListFields(centerlineToIntersect)
    
    fieldNamesList = list()
    
    # Iterate through the list of fields
    for field in fieldsList:
        fieldNamesList.append(field.name)
    
    # If the KDOT_ROUTENAME field is not found,
    # add it with adequate parameters.
    if "KDOT_ROUTENAME" not in fieldNamesList:
        #AddMessage("Adding KDOT_ROUTENAME to " + centerlineToIntersect + ".")
        #previousWorkspace = env.workspace  # @UndefinedVariable
        addFieldWorkspace = getGDBLocationFromFC(centerlineToIntersect)
        env.workspace = addFieldWorkspace
        
        fieldNameToAdd = "KDOT_ROUTENAME"
        fieldLength = 10
        
        AddField_management(centerlineToIntersect, fieldNameToAdd, "TEXT", "", "", fieldLength)
        
        # Set the workspace back to what it was previously to prevent
        # problems from occurring in the rest of the script.
        #env.workspace = previousWorkspace
        AddMessage("The " + str(fieldNameToAdd) + " field was added to " + str(centerlineToIntersect) + ".")
    
    else:
        AddMessage("The KDOT_ROUTENAME field already exists within " + centerlineToIntersect + ".")
        AddMessage("It will not be added again, but its values will be updated (where necessary).")
    
    aliasFields = ['SEGID', 'KDOT_ROUTENAME']
    
    #for fieldNameItem in fieldNamesList:
        #print fieldNameItem
    
    aliasCursor = SearchCursor(centerlineAliasTable, aliasFields)
    
    aliasList = list()
    
    for aliasRow in aliasCursor:
        if aliasRow[1] is not None:
            aliasList.append(aliasRow)
        else:
            pass
    try:
        del aliasCursor
    except:
        pass
    
    aliasDictionary = dict()
    
    for aliasListItem in aliasList:
        if aliasListItem[0] in aliasDictionary.keys():
            listContainer = aliasDictionary[aliasListItem[0]]
            listContainer.append(aliasListItem)
            aliasDictionary[aliasListItem[0]] = listContainer
        else:
            listContainer = list()
            listContainer.append(aliasListItem)
            aliasDictionary[aliasListItem[0]] = listContainer
    
    aliasListForUpdate = list()
    
    for aliasDictKey in aliasDictionary.keys():
        listContainer = aliasDictionary[aliasDictKey]
        bestRouteName = ''
        for listContainerItem in listContainer:
            currentRouteName = listContainerItem[1]
            # Logic to decide route to use based on route dominance is in
            # the compareRouteNames function.
            bestRouteName = compareRouteNames(bestRouteName, currentRouteName)
            
        aliasListForUpdate.append((aliasDictKey, bestRouteName))
    
    # Have to start an edit session because the feature class participates in a topology.
    try:
        editWorkspace = getGDBLocationFromFC(centerlineToIntersect)
                
        editSession = Editor(editWorkspace)
        
        editSession.startEditing(False, False)
        
        editSession.startOperation()
        
        routeToUpdateCursor = UpdateCursor(centerlineToIntersect, aliasFields)
        
        for routeToUpdate in routeToUpdateCursor:
            routeToUpdate = list(routeToUpdate)
            for aliasForUpdate in aliasListForUpdate:
                if routeToUpdate[0] == aliasForUpdate[0]:
                    routeToUpdate[1] = aliasForUpdate[1]
                else:
                    pass
            
            routeToUpdateCursor.updateRow(routeToUpdate)
        
        del routeToUpdateCursor
        
        editSession.stopOperation()
        
        editSession.stopEditing(True)
    
    except ExecuteError:
       AddMessage((GetMessages(2)))
def RoadNameRepair(optionsObject):

    # Adapted from a previous script created 2014-05-22
    # by Dirk Talley, which was called
    # Pre_Accident_Geocoding_Update_Cursor.py

    # This is an update cursor meant to clean up the road names in accident data.
    # It takes the road name information in the target feature set and reformats
    # it in the hope that the newly formatted data will have a higher match rate
    # when geocoded, without the introduction of any new errors.

    # For 2009 data in particular, it checks the information in the road database
    # and performs a check to see if there is a partial match on the 6-or-less
    # character road names with the information in the roads table for that
    # county. If it finds one, and only one, match, it changes the output road name
    # from the 6-or-less character name to the full (assumed) name.

    # If you get a "string or buffer expected" error message,
    # it is probably due to the script attempting a pattern match
    # on a None-type (<Null> in Arcmap) data entry in the table.
    # Make sure that you check the data for None-type entries.

    # The Fifth and Sixth string matching sections
    # no longer seem to take nearly as long as they
    # had previously. I ascribe this to the usage of
    # .da cursors and the "in_memory" workspace.

    try:
        # Set the environment
        env.workspace = "in_memory"

        # Set other variables
        uniqueRoadNamesTable = r"in_memory\UniqueRoadNames_Sorted"  ## Why not use the sorted table?
        uniqueRoadNamesTableFields = ["RD"]
        roadNamesList = list()
        accidentData = optionsObject.accidentDataTable

        try:
            oid_fieldname = Describe(accidentData).OIDFieldName
            print "The oid_fieldname is: " + oid_fieldname
        except:
            oid_fieldname = "ESRI_OID"

        if optionsObject.useKDOTFields == True:
            AddMessage('Checking KDOT Fields.')

            accidentCursorFields = [
                oid_fieldname, "COUNTY_NBR", "ON_ROAD_KDOT_NAME",
                "ON_ROAD_KDOT_TYPE", "ON_ROAD_KDOT_SFX_DIR",
                "AT_ROAD_KDOT_NAME", "AT_ROAD_KDOT_TYPE",
                "AT_ROAD_KDOT_SFX_DIR", "ON_AT_ROAD_KDOT_INTERSECT",
                "ACCIDENT_KEY"
            ]

        else:
            AddMessage('Checking LEO Fields.')

            accidentCursorFields = [
                oid_fieldname, "COUNTY_NBR", "ON_ROAD_NAME", "ON_ROAD_TYPE",
                "ON_ROAD_SUFFIX_DIRECTION", "AT_ROAD_NAME", "AT_ROAD_TYPE",
                "AT_ROAD_SUFFIX_DIRECTION", "ON_AT_ROAD_INTERSECT",
                "ACCIDENT_KEY"
            ]

        onRoadName = ""
        atRoadName = ""

        ## Should make a new table, not use the same one and update it.

        # Create a new search cursor to read in the data from
        # the uniqueRoadNames table.

        # Create a new search cursor to get road names.
        sCursor = SearchCursor(uniqueRoadNamesTable,
                               uniqueRoadNamesTableFields)

        # The table used for this cursor should come from
        # the CreateUniqueRoadNameTable function included
        # in this script.

        # If the base roads feature layer is updated, or otherwise changes
        # the uniqueRoadNamesTable will need to be run again.

        for sRow in sCursor:
            # Place the data into a 2-part list, with the pairs being County Number and Road Name as strings,
            # with County Number padded to 3 spaces with leading zeroes.
            #print "roadNamesListPart = " + str(sRow)
            roadNamesListPart = list(sRow)

            # Append the current county number and the current road name to the county number/road name list.
            roadNamesList.append(roadNamesListPart)

        try:
            del sCursor
        except:
            pass

        ####################################################
        # This script will now begin updates based on      #
        # eight patterns. The patterns are checked against #
        # data strings in the target accident data layer.  #
        # If it find matches, it attempts to make          #
        # updates and/or corrections to the data. If there #
        # is a problem with this script, please uncomment  #
        # the print statements to better watch the         #
        # program flow.                                    #
        ####################################################

        # Create the regex patterns to use in the next part,
        # with the update cursor.

        firstLetterMatchString = re.compile(r'C\\+\s+|C\\+|C/+\s+|C/+',
                                            re.IGNORECASE)
        firstNumberMatchString = re.compile(r'^C+[0-9]|^C+\s+[0-9]',
                                            re.IGNORECASE)
        secondMatchString = re.compile(r'^County Road [0-9]+/[ensw]',
                                       re.IGNORECASE)
        thirdMatchString = re.compile(r'[0-9]+[rnts][dht][a-z][a-z]',
                                      re.IGNORECASE)
        fourthMatchString = re.compile(r'[0-9]+[rnts][dht]/[ensw]',
                                       re.IGNORECASE)
        # Just a placeholder, the actual fifthMatchString pattern is generated
        # based on data retrieved within the accident table search cursor.
        fifthMatchString = re.compile(r'^WEST', re.IGNORECASE)
        # Just a placeholder, the actual sixthMatchString pattern is generated
        # based on data retrieved within the accident table search cursor.
        sixthMatchString = re.compile(r'^EAST', re.IGNORECASE)
        seventhMatchString = re.compile(r'^[0-9]+\s+[t][h]', re.IGNORECASE)
        eighthMatchString = re.compile(r'^[0-9]+[t][h][s]', re.IGNORECASE)
        atMatch = None
        orMatch = None

        accListDict = dict()

        # Create a new update cursor for the input feature class.

        # Use row[2], row.OR_TYPE, row[4]
        # or row[5], row[6], row[7]
        # to see if the names are already populating
        # the correct type and suffix fields for the
        # roads.

        # Replace all of these uCursors with an sCursor
        # then use a uCursor to update it
        # or use an iCursor to add them all back
        # into the table after truncating it.

        sCursor = SearchCursor(accidentData, accidentCursorFields)

        for sRow in sCursor:
            #print sRow[0]
            accListDict[sRow[0]] = list(sRow)

        try:
            del sCursor
        except:
            pass

        for accListItem in accListDict.values():
            #print str(roadListKey)

            # Perform updates here
            # Check the name of the roads and correct them.

            # Need to replace C\Q and C27 or (C 27) to County Road Q and County Road 27, respectively.
            # Erroneously encoded with a '\' rather than a '/' between the C and other
            # road name characters.

            if (accListItem[2] != None):
                orMatch = firstLetterMatchString.match(
                    accListItem[2])  # re.compile(r'C\\', re.IGNORECASE)
                if (orMatch != None):
                    ##AddMessage("Will replace the C\ in this OR_NAME2: " + str(accListItem[2]))
                    ##AddMessage(orMatch.end())
                    ##AddMessage("New OR_NAME2: COUNTY ROAD" + str(accListItem[2][orMatch.end():]))
                    accListItem[2] = "COUNTY ROAD " + accListItem[2][orMatch.
                                                                     end():]
                else:
                    pass
            else:
                pass

            if (accListItem[5] != None):
                atMatch = firstLetterMatchString.match(
                    accListItem[5])  # re.compile(r'C\\', re.IGNORECASE)
                if (atMatch != None):
                    ##AddMessage("Will replace the C\ in this AT_NAME2: " + str(accListItem[5]))
                    ##AddMessage(atMatch.end())
                    ##AddMessage("New AT_NAME2: COUNTY ROAD" + str(accListItem[5][atMatch.end():]))
                    accListItem[5] = "COUNTY ROAD " + accListItem[5][atMatch.
                                                                     end():]
                else:
                    pass
            else:
                pass

            accListDict[accListItem[0]] = accListItem

        for accListItem in accListDict.values():

            if (accListItem[2] != None):
                orMatch = firstNumberMatchString.match(
                    accListItem[2]
                )  # re.compile(r'^C[0-9]|^C+\s[0-9]', re.IGNORECASE)
                if (orMatch != None):
                    ##AddMessage("Will replace the C[0-9] or C+\s[0-9] in this OR_NAME2: " + str(accListItem[2]))
                    ##AddMessage(orMatch.end())
                    ##AddMessage("New OR_NAME2: COUNTY ROAD" + str(accListItem[2][orMatch.end():]))
                    accListItem[2] = "COUNTY ROAD " + accListItem[2][
                        orMatch.end() - 1:]
                else:
                    pass
            else:
                pass

            if (accListItem[5] != None):
                atMatch = firstNumberMatchString.match(
                    accListItem[5]
                )  # re.compile(r'^C[0-9]|^C+\s[0-9]', re.IGNORECASE)
                if (atMatch != None):
                    ##AddMessage("Will replace the C[0-9] or C+\s[0-9] in this AT_NAME2: " + str(accListItem[5]))
                    ##AddMessage(atMatch.end())
                    ##AddMessage("New AT_NAME2: COUNTY ROAD" + str(accListItem[5][atMatch.end():]))
                    accListItem[5] = "COUNTY ROAD " + accListItem[5][
                        atMatch.end() - 1:]
                else:
                    pass
            else:
                pass

            accListDict[accListItem[0]] = accListItem

            #print "After county name fix:"
            #print "accListDict[accListItem[0]]'s ON_ROAD_NAME & AT_ROAD_NAME  = " + str(accListDict[accListItem[0]][2]) + " & " + str(accListDict[accListItem[0]][5])

        AddMessage("####################################################")
        AddMessage("# End of First String Matching                     #")
        AddMessage("####################################################")

        for accListItem in accListDict.values():

            # Need to remove slashes, and if they have a
            # trailing directional make sure that it is
            # in the proper field.

            # Pattern matches one or more numbers, then
            # a forward slash, then a directional letter.
            if (accListItem[2] != None):
                orMatch = secondMatchString.match(
                    accListItem[2]
                )  # re.compile(r'^County Road [0-9]+/[ensw]', re.IGNORECASE)
                if (orMatch != None):
                    #print "Need to remove the slash and trailing directional from this OR_NAME2: ", accListItem[2]
                    #print "Match ended at: ", orMatch.end()
                    #print orMatch.group(0)[0:orMatch.end()-2] # The County Road without the slash and trailing directional -- Place this back in OR_NAME2
                    #print orMatch.group(0)[-2:-1] # The slash
                    #print orMatch.group(0)[-1:] # The trailing directional -- Check to see if this is the same as OR_SFX, if not, update OR_SFX
                    accListItem[2] = orMatch.group(0)[0:orMatch.end() - 2]

                    if (accListItem[4] != orMatch.group(0)[-1:]):
                        #print "OR_SFX does not match the trailing directional in OR_NAME2"
                        accListItem[4] = orMatch.group(0)[-1:]
                    else:
                        pass
                else:
                    pass
            else:
                pass

            if (accListItem[5] != None):
                atMatch = secondMatchString.match(
                    accListItem[5]
                )  # re.compile(r'^County Road [0-9]+/[ensw]', re.IGNORECASE)
                if (atMatch != None):
                    #print "Need to remove the slash and trailing directional from this AT_NAME2: ", accListItem[5]
                    #print "Match ended at: ", atMatch.end()
                    #print atMatch.group(0)[0:atMatch.end()-2] # The County Road without the slash and trailing directional -- Place this back in AT_NAME2
                    #print atMatch.group(0)[-2:-1] # The slash
                    #print atMatch.group(0)[-1:] # The trailing directional -- Check to see if this is the same as AT_SFX, if not, update AT_SFX
                    accListItem[5] = atMatch.group(0)[0:atMatch.end() - 2]
                    if (accListItem[7] != atMatch.group(0)[-1:]):
                        #print "AT_SFX does not match the trailing directional in AT_NAME2"
                        accListItem[7] = atMatch.group(0)[-1:]
                    else:
                        pass
                else:
                    pass
            else:
                pass

            accListDict[accListItem[0]] = accListItem

        AddMessage("####################################################")
        AddMessage("# End of Second String Matching                    #")
        AddMessage("####################################################")

        #print "At the end of third string matching, this is how the road names look:"

        atMatch = None
        orMatch = None

        for accListItem in accListDict.values():

            # Need to separate 2NDST, 14THST and similar ones.

            if (accListItem[2] != None):
                orMatch = thirdMatchString.match(
                    accListItem[2]
                )  # thirdMatchString = re.compile(r'[0-9]+[nts][dht][a-z][a-z]', re.IGNORECASE)
                if (orMatch != None):
                    #print "Need to change this from #NDST/#STST/#THST, to #ND/#ST/#TH and have ST in the OR_TYPE field: ", accListItem[2]
                    #print orMatch.end()
                    #print accListItem[2][0:orMatch.end()-2]
                    #print accListItem[2][-2:]
                    accListItem[2] = accListItem[2][0:orMatch.end() - 2]
                    if (accListItem[3] != orMatch.group(0)[-2:]):
                        #print "OR_TYPE does not match the TYPE erroneously concatenated in OR_NAME2"
                        #print "New OR_TYPE should be: ", accListItem[2][-2:]
                        accListItem[3] = orMatch.group(0)[-2:]
                    else:
                        pass
                else:
                    pass
            else:
                pass

            if (accListItem[5] != None):
                atMatch = thirdMatchString.match(
                    accListItem[5]
                )  # thirdMatchString = re.compile(r'[0-9]+[nts][dht][a-z][a-z]', re.IGNORECASE)
                if (atMatch != None):
                    #print "Need to change this from #NDST/#STST/#THST, to #ND/#ST/#TH and have ST in the AT_TYPE field: ", accListItem[5]
                    #print atMatch.end()
                    #print accListItem[5][0:atMatch.end()-2]
                    #print accListItem[5][-2:]
                    accListItem[5] = accListItem[5][0:atMatch.end() - 2]
                    if (accListItem[6] != atMatch.group(0)[-2:]):
                        #print "AT_TYPE does not match the TYPE erroneously concatenated in AT_NAME2"
                        #print "New AT_TYPE should be: ", accListItem[5][-2:]
                        accListItem[6] = atMatch.group(0)[-2:]
                    else:
                        pass
                else:
                    pass
            else:
                pass

            accListDict[accListItem[0]] = accListItem

            #print "ON_ROAD_NAME & AT_ROAD_NAME  = " + str(accListDict[accListItem[0]][2]) + " & " + str(accListDict[accListItem[0]][5])

        AddMessage("####################################################")
        AddMessage("# End of Third String Matching                     #")
        AddMessage("####################################################")

        atMatch = None
        orMatch = None

        for accListItem in accListDict.values():

            # Need to remove /S from 2ND/S, and similar.
            # Check to see if the trailing directional is in the proper field.
            # If not, update the field to be correct.

            if (accListItem[2] != None):
                orMatch = fourthMatchString.match(
                    accListItem[2]
                )  # fourthMatchString = re.compile(r'[0-9]+[nts][dht]/[ensw]', re.IGNORECASE)
                if (orMatch != None):
                    #print "Need to remove the slash and trailing directional from this OR_NAME2: ", accListItem[2]
                    #print "Match ended at: ", orMatch.end()
                    #print orMatch.group(0)[0:orMatch.end()-2] # The Street Name without the slash and trailing directional -- Place this back in OR_NAME2
                    #print orMatch.group(0)[-2:-1] # The slash
                    #print orMatch.group(0)[-1:] # The trailing directional -- Check to see if this is the same as OR_SFX, if not, update OR_SFX
                    accListItem[2] = orMatch.group(0)[0:orMatch.end() - 2]
                    if (accListItem[4] != orMatch.group(0)[-1:]):
                        #print "OR_SFX does not match the trailing directional in OR_NAME2"
                        accListItem[4] = orMatch.group(0)[-1:]
                    else:
                        pass
                else:
                    pass
            else:
                pass

            if (accListItem[5] != None):
                atMatch = fourthMatchString.match(
                    accListItem[5]
                )  # fourthMatchString = re.compile(r'[0-9]+[nts][dht]/[ensw]', re.IGNORECASE)
                if (atMatch != None):
                    #print "Need to remove the slash and trailing directional from this AT_NAME2: ", accListItem[5]
                    #print "Match ended at: ", atMatch.end()
                    #print atMatch.group(0)[0:atMatch.end()-2] # The Street Name without the slash and trailing directional -- Place this back in AT_NAME2
                    #print atMatch.group(0)[-2:-1] # The slash
                    #print atMatch.group(0)[-1:] # The trailing directional -- Check to see if this is the same as AT_SFX, if not, update AT_SFX
                    accListItem[5] = atMatch.group(0)[0:atMatch.end() - 2]
                    if (accListItem[7] != atMatch.group(0)[-1:]):
                        #print "AT_SFX does not match the trailing directional in AT_NAME2"
                        accListItem[7] = atMatch.group(0)[-1:]
                    else:
                        pass
                else:
                    pass
            else:
                pass

            accListDict[accListItem[0]] = accListItem

        AddMessage("####################################################")
        AddMessage("# End of Fourth String Matching                    #")
        AddMessage("####################################################")

        ### Fifth and Sixth String matching are more complex and
        ### will take more time to rebuild.

        ### But, I can probably remove some of the complexity
        ### by making sure that I'm only focused on one county
        ### at a time (Thus removing county checks)
        ### and by making sure that the years are selected for
        ### properly.

        atMatch = None
        orMatch = None

        for accListItem in accListDict.values():

            # If there are problems, try moving orMatch reinitialization here.
            # This cursor updates the on road name (ON_ROAD_NAME) for the
            # accident data if the data is from the year 2010 or before,
            # when the maximum field length for road names was increased from 6.

            if (accListItem[2] != None and
                (len(accListItem[2]) == 5 or len(accListItem[2]) == 6)):
                try:
                    accYear = accListItem[9][
                        0:4]  # Get the first 4 characters of the Accident_Key
                    accYear = int(accYear)  # Turn them into an integer.
                except:
                    accYear = 2000  # If there was a problem, assume the accident was from 2000.

                if (
                        accYear <= 2010
                ):  ## Replaced previous check with this check for accYears which are 2010 or less .

                    # The next line creates a regex pattern using the current row's AT_ROAD_NAME field
                    # as the pattern, ignoring case.

                    fifthMatchString = re.compile(
                        r'{}'.format(re.escape(accListItem[2])), re.IGNORECASE)
                    #print "This data about", accListItem[2], "is from", int(accListItem.YEAR)
                    roadMatchCounter = 0
                    for roadNamesItem in roadNamesList:
                        noSpacesRoadName = str(roadNamesItem[0]).replace(
                            ' ', '')
                        orMatch = fifthMatchString.match(noSpacesRoadName)
                        if (orMatch != None):
                            ##AddMessage( "Found a match for " + str(accListItem[2]) + "and " + str(roadNamesItem[0]) + ".")
                            roadMatchCounter += 1
                        else:
                            pass
                    # If there was only one match between the accident road name for that county and the
                    # unique road names for that county, replace the accident road name with the
                    # unique road name. -- Does another loop through the roadList to accomplish
                    # this. Probably not the most efficient way to do this, but it works.
                    if roadMatchCounter == 1:
                        #print "Exactly one road matched in this county. Road Matches: ", roadMatchCounter
                        for roadNamesItem in roadNamesList:
                            # Make sure that the length of the roadNamesItem's name is 6 or greater
                            # and that it is larger than the accListItem.
                            if len(roadNamesItem[0]) > 5 and len(
                                    roadNamesItem[0]) > len(accListItem[2]):
                                noSpacesRoadName = str(
                                    roadNamesItem[0]).replace(' ', '')
                                orMatch = fifthMatchString.match(
                                    noSpacesRoadName)
                                if (orMatch != None):
                                    AddMessage("Old on road name was: " +
                                               str(accListItem[2]))
                                    AddMessage(
                                        "New on road name will be corrected to: "
                                        + str(roadNamesItem[0]).upper())
                                    accListItem[2] = str(
                                        roadNamesItem[0]).upper()
                                else:
                                    pass
                            else:
                                pass

                    elif roadMatchCounter > 1:
                        AddMessage(
                            str(roadMatchCounter) +
                            " unique road names in this county matched the record's road name of: "
                            + str(accListItem[2]))
                        AddMessage(
                            "The record's road name will not be expanded.")
                        pass
                    else:
                        pass
                else:
                    pass
            else:
                pass

            accListDict[accListItem[0]] = accListItem

        AddMessage("####################################################")
        AddMessage("# End of Fifth String Matching                     #")
        AddMessage("####################################################")

        atMatch = None
        orMatch = None

        for accListItem in accListDict.values():

            # If there are problems, try moving atMatch reinitialization here.
            # This cursor updates the at road name (AT_ROAD_NAME) for the
            # accident data if the data is from the year 2010 or before, when the
            # maximum field length for road names was increased from 6.

            if (accListItem[5] != None and
                (len(accListItem[5]) == 5 or len(accListItem[5]) == 6)):
                try:
                    accYear = accListItem[9][
                        0:4]  # Get the first 4 characters of the Accident_Key
                    accYear = int(accYear)  # Turn them into an integer.
                except:
                    accYear = 2000  # If there was a problem, assume the accident was from 2000.

                if (
                        accYear <= 2010
                ):  ## Replaced previous check with this check for accYears which are 2010 or less.

                    # The next line creates a regex pattern using the current row's AT_ROAD_NAME field
                    # as the pattern, ignoring case.

                    sixthMatchString = re.compile(
                        r'{}'.format(re.escape(accListItem[5])), re.IGNORECASE)
                    #print "This data about", accListItem[5], "is from", int(accListItem.YEAR)
                    roadMatchCounter = 0
                    for roadNamesItem in roadNamesList:
                        # Removes all the spaces from the roadName, allowing
                        # for matching of UNIONC to UNION CHAPEL, LONETR to LONE TREE,
                        # TRICIT to TRI CITY, etc.
                        noSpacesRoadName = str(roadNamesItem[0]).replace(
                            ' ', '')
                        atMatch = sixthMatchString.match(noSpacesRoadName)
                        if (atMatch != None):
                            ##AddMessage("Found a match for " + str(accListItem[5]) +  " and "  + str(roadNamesItem[0]) + ".")
                            roadMatchCounter += 1
                        else:
                            pass
                    # If there was only one match between the accident road name for that county and the
                    # unique road names for that county, replace the accident road name with the
                    # unique road name. -- Does another loop through the roadList to accomplish
                    # this. Probably not the most efficient way to do this, but it works.
                    if roadMatchCounter == 1:
                        #print "Exactly one road matched in this county. Road Matches: ", roadMatchCounter
                        for roadNamesItem in roadNamesList:
                            # Make sure that the length of the roadNamesItem's name is 6 or greater
                            # and that it is larger than the accListItem.
                            if len(roadNamesItem[0]) > 5 and len(
                                    roadNamesItem[0]) > len(accListItem[5]):
                                noSpacesRoadName = str(
                                    roadNamesItem[0]).replace(' ', '')
                                atMatch = sixthMatchString.match(
                                    noSpacesRoadName)
                                if (atMatch != None):
                                    AddMessage("Old at road name was: " +
                                               str(accListItem[5]))
                                    AddMessage(
                                        "New at road name will be corrected to: "
                                        + str(roadNamesItem[0]).upper())
                                    accListItem[5] = str(
                                        roadNamesItem[0]).upper()
                                else:
                                    pass
                            else:
                                pass

                    elif roadMatchCounter > 1:
                        AddMessage(
                            str(roadMatchCounter) +
                            " unique road names in this county matched the record's road name of: "
                            + str(accListItem[5]))
                        AddMessage(
                            "The record's road name will not be expanded.")
                        pass
                    else:
                        pass
                else:
                    pass
            else:
                pass

            accListDict[accListItem[0]] = accListItem

        AddMessage("####################################################")
        AddMessage("# End of Sixth String Matching                     #")
        AddMessage("####################################################")

        for accListItem in accListDict.values():

            # Remove the extra space in roads with names like "5 TH" and "17 TH".
            # Also remove single and double quotes.

            if (accListItem[2] != None):
                accListItem[2] = accListItem[2].replace(
                    "'", "")  # Remove Single Quotes
                accListItem[2] = accListItem[2].replace(
                    '"', '')  # Remove Double Quotes
                orMatch = seventhMatchString.match(
                    accListItem[2]
                )  # re.compile(r'^[0-9]+\s[t][h]', re.IGNORECASE)
                if (orMatch != None):
                    AddMessage(str(accListItem[2]) + " will be changed to:")
                    accListItem[2] = orMatch.group(0)[0:orMatch.end(
                    ) - 3] + orMatch.group(0)[orMatch.end() - 2:orMatch.end()]
                    AddMessage(str(accListItem[2]))
                else:
                    pass
            else:
                pass

            if (accListItem[5] != None):
                accListItem[5] = accListItem[5].replace(
                    "'", "")  # Remove Single Quotes
                accListItem[5] = accListItem[5].replace(
                    '"', '')  # Remove Double Quotes
                atMatch = seventhMatchString.match(
                    accListItem[5]
                )  # re.compile(r'^[0-9]+\s[t][h]', re.IGNORECASE)
                if (atMatch != None):
                    AddMessage(str(accListItem[5]) + " will be changed to:")
                    accListItem[5] = atMatch.group(0)[0:atMatch.end(
                    ) - 3] + atMatch.group(0)[atMatch.end() - 2:atMatch.end()]
                    AddMessage(str(accListItem[5]))
                else:
                    pass
            else:
                pass

            accListDict[accListItem[0]] = accListItem

        AddMessage("####################################################")
        AddMessage("# End of Seventh String Matching                   #")
        AddMessage("####################################################")

        for accListItem in accListDict.values():

            # Remove the extra space in roads with names like "5 TH" and "17 TH".
            # Also remove single and double quotes.

            if (accListItem[2] != None):
                orMatch = eighthMatchString.match(
                    accListItem[2]
                )  # re.compile(r'^[0-9]+[t][h][s]', re.IGNORECASE)
                if (orMatch != None):
                    AddMessage(str(accListItem[2]) + " will be changed to:")
                    accListItem[2] = orMatch.group(0)[0:orMatch.end() - 1]
                    AddMessage(str(accListItem[2]))
                else:
                    pass
            else:
                pass

            if (accListItem[5] != None):
                atMatch = eighthMatchString.match(
                    accListItem[5]
                )  # re.compile(r'^[0-9]+[t][h][s]', re.IGNORECASE)
                if (atMatch != None):
                    AddMessage(str(accListItem[5]) + " will be changed to:")
                    accListItem[5] = atMatch.group(0)[0:atMatch.end() - 1]
                    AddMessage(str(accListItem[5]))
                else:
                    pass
            else:
                pass

            accListDict[accListItem[0]] = accListItem

        AddMessage("####################################################")
        AddMessage("# End of Eighth String Matching                    #")
        AddMessage("####################################################")

        AddMessage("####################################################")
        AddMessage("# Rebuilding Intersection Names                    #")
        AddMessage("####################################################")

        for accListItem in accListDict.values():

            # Rebuild the intersection names in the form of:
            # onRoadName + " | " + atRoadName

            onRoadName = ""
            atRoadName = ""

            if accListItem[2] != None:
                onRoadName = str(accListItem[2])
            else:
                pass

            if accListItem[5] != None:
                atRoadName = str(accListItem[5])
            else:
                pass

            if onRoadName != None and atRoadName != None:
                accListItem[8] = str(onRoadName + " | " + atRoadName)
            else:
                pass

            accListDict[accListItem[0]] = accListItem

        AddMessage("####################################################")
        AddMessage("# Intersection Names Rebuilt                       #")
        AddMessage("####################################################")

        ### Don't forget to add accident_key to the list of sCursor
        ### fields. Need it to properly select which accidents
        ### need their road names un-truncated.

        AddMessage("####################################################")
        AddMessage("# Applying Changes with an Update Cursor           #")
        AddMessage("####################################################")

        uCursor = UpdateCursor(accidentData, accidentCursorFields)

        for uCursorRow in uCursor:
            try:
                accListItem = accListDict[uCursorRow[0]]
                uCursor.updateRow(accListItem)
            except:
                AddMessage("An error occured while updating.")
        try:
            del uCursor
        except:
            pass

        AddMessage("####################################################")
        AddMessage("# Update completed.                                #")
        AddMessage("####################################################")

    except Exception as newException:
        print str(newException)
        del newException

    finally:
        try:
            del sCursor
        except:
            pass
        try:
            del uCursor
        except:
            pass