예제 #1
0
    def setupRowCollection():
        """Setup the RowDataCollection for loading the data into.
        """
        # First entry doesn't want to have a comma in front when formatting.
        row_collection = RowDataCollection()
        types = [0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1]

        # Do the first entry separately because it has a different format string
        row_collection.initCollection(
            do.StringData(0, 0, format_str='{0}', default=''))
        for i, t in enumerate(types, 1):
            if t == 0:
                row_collection.initCollection(
                    do.StringData(i, i, format_str=', {0}', default=''))
            else:
                row_collection.initCollection(
                    do.FloatData(i,
                                 i,
                                 format_str=', {0}',
                                 no_of_dps=3,
                                 default=0.00))

        # Add a couple of extra rows to the row_collection for tracking the
        # data in the file.
        row_collection.initCollection(do.IntData(15, 'row_no'))

        return row_collection
예제 #2
0
class SpillUnit (AIsisUnit): 
    """Concrete implementation of AIsisUnit storing Isis Spill Unit data.

    Contains a reference to a rowdatacollection for storing and
    accessing all the row data. i.e. the geometry data for the section,
    containing the chainage, elevation, etc values.
    Methods for accessing the data in these objects and adding removing rows
    are available.
    
    See Also:
        AIsisUnit
    """
    
    # Name constants the values dictionary
    CHAINAGE = 'chainage'
    ELEVATION = 'elevation'
    EASTING = 'easting'
    NORTHING = 'northing'
    
    UNIT_TYPE = 'Spill'
    CATEGORY = 'Spill'
    FILE_KEY = 'SPILL'


    def __init__(self, file_order): 
        """Constructor.
        
        Args:
            fileOrder (int): The location of this unit in the file.
        """
        AIsisUnit.__init__(self, file_order)

        # Fill in the header values these contain the data at the top of the
        # section, such as the unit name and labels.
        self.head_data = {'section_label': '', 'spill_ds': '', 'coeff': 0, 
                          'modular_limit': 0, 'comment': '', 'rowcount': 0} 

        self.unit_type = SpillUnit.UNIT_TYPE
        self.unit_category = SpillUnit.CATEGORY
        self.has_datarows = True
        self.unit_length = 0

    
    def readUnitData(self, unit_data, file_line):
        """Reads the unit data into the geometry objects.
        
        Args:
            unit_data (list): The part of the isis dat file pertaining to 
                this section 
        
        See Also:
            AIsisUnit - readUnitData()
        """
        file_line = self._readHeadData(unit_data, file_line)
        self.name = self.head_data['section_label']
        file_line = self._readRowData(unit_data, file_line)
        self.head_data['rowcount'] = self.row_collection.getNumberOfRows()
        return file_line - 1

    def _readHeadData(self, unit_data, file_line):            
        """Reads the data in the file header section into the class.
        
        Args:
            unit_data (list): contains data for this unit.
        """
        self.head_data['comment'] = unit_data[file_line][5:].strip()
        self.name = self.head_data['section_label'] = unit_data[file_line + 1][:12].strip()
        self.head_data['spill_ds'] = unit_data[file_line + 1][12:24].strip()
        self.head_data['coeff'] = unit_data[file_line + 2][:10].strip()
        self.head_data['modular_limit'] = unit_data[file_line + 2][10:20].strip()
        self.unit_length = int(unit_data[file_line + 3].strip())
        return file_line + 4


    def _readRowData(self, unit_data, file_line):
        """Reads the units rows into the row collection.

        This is all the geometry data that occurs after the no of rows variable in
        the Spill Units of the dat file.
        
        Args:
            unit_data: the data pertaining to this unit.
        """ 
        # Add the new row data types to the object collection
        # All of them must have type, output format, default value and position
        # in the row as the first variables in vars.
        # The others are DataType specific.
        self.row_collection = RowDataCollection()
        self.row_collection.initCollection(do.FloatData(0, rdt.CHAINAGE, format_str='{:>10}', no_of_dps=3))
        self.row_collection.initCollection(do.FloatData(1, rdt.ELEVATION, format_str='{:>10}', no_of_dps=3))
        self.row_collection.initCollection(do.FloatData(2, rdt.EASTING, format_str='{:>10}', no_of_dps=2, default=0.0))
        self.row_collection.initCollection(do.FloatData(3, rdt.NORTHING, format_str='{:>10}', no_of_dps=2, default=0.0))

        out_line = file_line + self.unit_length
        try:
            # Load the geometry data
            for i in range(file_line, out_line):
                
                # Put the values into the respective data objects            
                # This is done based on the column widths set in the Dat file
                # for the spill section.
                self.row_collection.addValue(rdt.CHAINAGE, unit_data[i][0:10].strip())
                self.row_collection.addValue(rdt.ELEVATION, unit_data[i][10:20].strip())
                
                # In some edge cases there are no values set in the file for the
                # easting and northing, so use defaults.
                if not len(unit_data[i]) > 21:
                    self.row_collection.addValue(rdt.EASTING)
                    self.row_collection.addValue(rdt.NORTHING)
                else:
                    self.row_collection.addValue(rdt.EASTING, unit_data[i][20:30].strip())
                    self.row_collection.addValue(rdt.NORTHING, unit_data[i][30:40].strip())
                
        except NotImplementedError:
            logger.ERROR('Unable to read Unit Data(dataRowObject creation) - NotImplementedError')
            raise
            
        return out_line
    

    def getData(self): 
        """Retrieve the data in this unit.

        The String[] returned is formatted for printing in the fashion
        of the .dat file.
        
        Returns:
            list of output data formated the same as in the .DAT file.
        """
        out_data = self._getHeadData()
        out_data.extend(self._getRowData()) 
        
        return out_data
  
  
    def _getRowData(self):
        """Get the data in the row collection.
        
        For all the rows in the spill geometry section get the data from
        the rowdatacollection class.
        
        Returns:
            list containing the formatted unit rows.
        """
        out_data = []
        for i in range(0, self.row_collection.getNumberOfRows()): 
            out_data.append(self.row_collection.getPrintableRow(i))
        
        return out_data
   
  
    def _getHeadData(self):
        """Get the header data formatted for printing out.
        
        Returns:
            list - contining the formatted head data.
        """
        out_data = []
        self.head_data['rowcount'] = self.unit_length
        out_data.append('SPILL ' + self.head_data['comment'])
        
        # Get the row with the section name and spill info from the formatter
        out_data.append('{:<12}'.format(self.head_data['section_label']) + 
                        '{:<12}'.format(self.head_data['spill_ds'])
                        )
        
        out_data.append('{:>10}'.format(self.head_data['coeff']) + 
                        '{:>10}'.format(self.head_data['modular_limit'])
                        )
        out_data.append('{:>10}'.format(self.head_data['rowcount']))
        
        return out_data
   
        
    def addDataRow(self, chainage, elevation, index=None, easting = 0.00, 
                                                        northing = 0.00): 
        """Adds a new row to the bridge unit.

        Ensures that certain requirements of the data rows, such as the 
        chainage needing to increase for each row down are met, then call the 
        addNewRow() method in the row_collection.
        
        Args:
            chainage (float): chainage value. Must not be less than the
                previous chaninage in the collection.
            elevation (float): elevation in datum.
            index (int): stating the position to insert the new row - Optional. 
                If no value is given it will be appended to the end of the 
                data_object
            
            The other values are all optional and will be set to defaults if
            not given.

        Returns:
            False if the addNewRow() method is unsuccessful.
        
        Raises:
            IndexError: If the index does not exist.
            ValueError: If the given value is not accepted by the DataObjects. 
            
        See Also:
            ADataObject and subclasses for information on the parameters.
        """
        
        # If it greater than the record length then raise an index error
        if index > self.row_collection.getNumberOfRows():
            raise IndexError ('Given index out of bounds of row_collection')
        # If it's the same as the record length then we can set index to None
        # type and it will be appended instead of inserted.
        if index == self.row_collection.getNumberOfRows():
            index = None
        # Check that there won't be a negative change in chainage across row.
        if self._checkChainageIncreaseNotNegative(index, chainage) == False:
            raise ValueError ('Chainage increase cannot be negative')

        # Call the row collection add row method to add the new row.
        self.row_collection.addNewRow(values_dict={'chainage': chainage, 
                'elevation': elevation, 'easting': easting, 
                'northing': northing}, index=index)
    
    
    def _checkChainageIncreaseNotNegative(self, index, chainageValue):
        """Checks that new chainage value is not not higher than the next one.

        If the given chainage value for the given index is higher than the
        value in the following row ISIS will give a negative chainage error.

        It will return true if the value is the last in the row.
        
        Args:
            index (int): The index that the value is to be added at.
            chainageValue (float): The chainage value to be added.
        
        Returns:
           False if greater or True if less.
        """
        if index == None:
            return True
        
        if not index == 0:
            if self.row_collection.getDataValue('chainage', index - 1) >= chainageValue:
                return False
        
        if self.row_collection.getDataValue('chainage', index) <= chainageValue:
            return False
            
        return True
        
        
예제 #3
0
def readTmfFile(datafile):
    """Loads the contents of the Materials CSV file referenced by datafile.
    
    Loads the data from the file referenced by the given TuflowFile object into
    a :class:'rowdatacollection' and a list of comment only lines.
    
    Args:
        datafile(TuflowFile): TuflowFile object with file details.
        
    Return:
        tuple: rowdatacollection, comment_lines(list).
        
    See Also:
        :class:'rowdatacollection'.
    """
    value_separator = ','
    comment_types = ['#', '!']
    tmf_enum = dataobj.TmfEnum()

    path = datafile.getAbsolutePath()
    value_order = range(11)

    row_collection = RowDataCollection()
    row_collection.initCollection(do.IntData(0, 0, format_str=None,
                                             default=''))
    for i in range(1, 11):
        row_collection.initCollection(
            do.FloatData(i, i, format_str=', {0}', default='', no_of_dps=3))

    # Keep track of any comment lines and the row numbers as well
    row_collection.initCollection(
        do.StringData(11, 'comment', format_str=' ! {0}', default=''))
    row_collection.initCollection(
        do.IntData(12, 'row_no', format_str=None, default=''))

    contents = []
    logger.info('Loading data file contents from disc - %s' % (path))
    contents = _loadFileFromDisc(path)

    # Stores the comments found in the file
    comment_lines = []

    # Loop through the contents list loaded from file line-by-line.
    first_data_line = False
    row_count = 0
    for i, line in enumerate(contents, 0):

        comment = hasCommentOnlyLine(line, comment_types)
        if comment or comment == '':
            comment_lines.append(comment)

        # If we have a line that isn't a comment or a blank then it is going
        # to contain materials entries.
        else:
            comment_lines.append(None)
            row_collection = _loadRowData(line, row_count, row_collection,
                                          tmf_enum.ITERABLE, comment_types,
                                          value_separator)
            row_count += 1

    # Just need to reset the has_changed variable because it will have been
    # set to True while loading everything in.
    for i in range(0, len(value_order)):
        row_collection.getDataObject(value_order[i]).has_changed = False

    return row_collection, comment_lines
예제 #4
0
def readMatSubfile(main_datafile, filename,
                   header_list):  #path, root, header1, header2):
    """
    """
    value_separator = ','
    comment_types = ['#', '!']
    mat_subfile_enum = dataobj.SubfileMatEnum()
    path = os.path.join(main_datafile.root, filename)
    root = main_datafile.root

    header1 = 'None'
    header2 = 'None'
    if len(header_list) > 0:
        header1 = header_list[0]
        if len(header_list) > 1:
            header2 = header_list[1]

    def _scanfile(filepath):
        """Scans the file before we do any loading to identify the contents.
        Need to do this because the file can be setup in so many way that it
        becomes a headache to work it out in advance. Better to take a little
        bit of extra processing time and do some quick checks first.
         
        Arguments:
            file_path (str): the path to the subfile.
        
        Return:
            tuple:
                 list: booleans with whether the column contains
                       data that we want or not.
                 int:  length of the cols list.
                 list: containing all of the first row column data
                 int:  first row with usable data on.
        """
        logger.debug('Scanning Materials file - %s' % (filepath))

        with open(filepath, 'rb') as csv_file:

            csv_file = csv.reader(csv_file)

            cols = []
            head_list = []
            start_row = -1
            for i, row in enumerate(csv_file, 0):
                if "".join(row).strip() == "":
                    break

                for j, col in enumerate(row, 0):
                    if i == 0:
                        cols.append(False)
                        head_list = row
                    elif uuf.isNumeric(col):
                        cols[j] = True
                        if start_row == -1:
                            start_row = i
                    elif cols[j] == True:
                        break

        return cols, len(cols), head_list, start_row

    def _loadHeadData(row, row_collection, col_length):
        """
        """
        new_row = [None] * 12

        comment_indices, length = uuf.findSubstringInList('!', row)
        comment_lines.append(None)

        head1_location = -1
        head2_location = -1
        row_length = len(row)
        for i in range(0, col_length):
            if i < row_length:
                entry = row[i].strip()
                if entry == header1:
                    head1_location = i
                if entry == header2:
                    head2_location = i
                row_collection.addValue('actual_header', entry)

        return row_collection, head1_location, head2_location

    def _loadRowData(row, row_count, row_collection, comment_lines, col_length,
                     start_row):
        """Loads the data in a specific row of the file.
        
        Args:
            row(list): containing the row data.
            row_count(int): the current row number.
            required_headers(list): column names that must exist.

        Return:
            rowdatacollection: updated with header row details.
        """
        # Any lines that aren't headers, but are above the first row to contain
        # actual data will be stored as comment lines
        if row_count < start_row:
            comment_lines.append(row)
            return row_collection, comment_lines
        else:
            comment_lines.append(None)

        if '!' in row[-1] or '#' in row[-1]:
            row_collection.addValue('comment', row[-1])

        # Add the row data in the order that it appears in the file
        # from left to right.
        for i in range(col_length):
            if i < len(row):
                row_collection.addValue(i, row[i])

        return row_collection, comment_lines

    try:
        logger.info('Loading data file contents from disc - %s' % (path))
        with open(path, 'rb') as csv_file:
            csv_file = csv.reader(csv_file)

            # Do a quick check of the file setup
            cols, col_length, head_list, start_row = _scanfile(path)

            # First entry doesn't want to have a comma in front when formatting.
            # but all of the others do.
            row_collection = RowDataCollection()
            row_collection.initCollection(
                do.FloatData(0, 0, format_str=' {0}', default='', no_of_dps=6))
            for i in range(1, len(cols)):
                if cols[i] == True:
                    row_collection.initCollection(
                        do.FloatData(i,
                                     i,
                                     format_str=', {0}',
                                     default='',
                                     no_of_dps=6))
                else:
                    row_collection.initCollection(
                        do.StringData(i, i, format_str=', {0}', default=''))

            row_collection.initCollection(
                do.StringData(0, 'actual_header', format_str='{0}',
                              default=''))
            row_collection.initCollection(
                do.IntData(15, 'row_no', format_str=None, default=''))

            # Stores the comments found in the file
            comment_lines = []
            first_data_line = False
            # Loop through the contents list loaded from file line-by-line.
            for i, line in enumerate(csv_file, 0):

                comment = hasCommentOnlyLine(''.join(line), comment_types)
                if comment or comment == '':
                    comment_lines.append([comment, i])

                # If we have a line that isn't a comment or a blank then it is going
                # to contain materials entries.
                else:
                    # First non-comment is the headers
                    if first_data_line == False:
                        first_data_line = True
                        row_collection, head1_loc, head2_loc = _loadHeadData(
                            line, row_collection, col_length)
                    else:
                        row_collection, comment_lines = _loadRowData(
                            line, i, row_collection, comment_lines, col_length,
                            start_row)

                    row_collection.addValue('row_no', i)

    except IOError:
        logger.warning('Cannot load file - IOError')
        raise IOError('Cannot load file at: ' + path)

    path_holder = filetools.PathHolder(path, root)
    mat_sub = dataobj.DataFileSubfileMat(path_holder, row_collection,
                                         comment_lines, path_holder.file_name,
                                         head1_loc, head2_loc)
    return mat_sub
예제 #5
0
def readMatCsvFile(datafile):
    """Loads the contents of the Materials CSV file referenced by datafile.
    
    Loads the data from the file referenced by the given TuflowFile object into
    a :class:'rowdatacollection' and a list of comment only lines.
    
    Args:
        datafile(TuflowFile): TuflowFile object with file details.
        
    Return:
        tuple: rowdatacollection, comment_lines(list).
        
    See Also:
        :class:'rowdatacollection'.
    """
    value_seperator = ','
    comment_types = ['#', '!']
    csv_enum = dataobj.MatCsvEnum()
    subfile_details = {}

    def _loadHeadData(row, row_collection):
        """
        """
        new_row = [None] * 12

        if '!' in row[-1] or '#' in row[-1]:
            row_collection.addValue('comment', row[-1])

        new_row[0] = row[0]
        new_row[1] = row[1]
        new_row[9] = row[2]
        new_row[11] = row[3]

        row_length = len(new_row)
        for i, v in enumerate(new_row):
            if i < row_length:
                row_collection.addValue('actual_header', new_row[i])

        return row_collection

    def _disectEntry(col_no, entry, new_row):
        """Breaks the row values into the appropriate object values.
        
        The materials file can have Excel style sub-values. i.e. it can have
        seperate columns defined within a bigger one. This function will break
        those values down into a format usable by the values initiated in the
        rowdatacollection.
        
        Args:
            col_no(int): the current column number.
            entry(string): the value of the current column.
            new_row(list): the row values to update.
            
        Return:
            list containing the updated row values.
        
        Note:
            This isn't very nice. Need to clean it up and find a better, safer
            way of dealing with breaking the row data up. It may be excess work
            but perhaps creating an xml converter could work quite will and
            make dealing with the file a bit easier?
        """
        made_change = False

        # Put in ID and Hazard as normal
        if col_no == 0:
            new_row[0] = entry
        elif col_no == 11:
            new_row[11] = entry
        # Possible break up Manning's entry further
        elif col_no == 1:
            # See if there's more than one value in the Manning's category.
            splitval = entry.split(',')

            # If there is and it's numeric then it's a single value for 'n'
            if len(splitval) == 1:
                if uuf.isNumeric(splitval[0]):
                    new_row[1] = splitval[0]

                # Otherwise it's a filename. These can be further separated
                # into two column headers to read from the sub files.
                else:
                    strsplit = splitval[0].split('|')
                    if len(strsplit) == 1:
                        subfile_details[strsplit[0].strip()] = []
                        new_row[6] = strsplit[0].strip()
                    elif len(strsplit) == 2:
                        subfile_details[strsplit[0]] = [strsplit[1].strip()]
                        new_row[6] = strsplit[0].strip()
                        new_row[7] = strsplit[1].strip()
                    else:
                        subfile_details[strsplit[0]] = [
                            strsplit[1].strip(), strsplit[2].strip()
                        ]
                        new_row[6] = strsplit[0].strip()
                        new_row[7] = strsplit[1].strip()
                        new_row[8] = strsplit[2].strip()

            # If there's more than one value then it must be the Manning's
            # depth curve values (N1, Y1, N2, Y2).
            else:
                new_row[2] = splitval[0]
                new_row[3] = splitval[1]
                new_row[4] = splitval[2]
                new_row[5] = splitval[3]

        # Finally grab the infiltration parameters (IL, CL)
        elif col_no == 2:
            splitval = entry.split(',')
            new_row[9] = splitval[0]
            new_row[10] = splitval[1]

        return new_row

    def _loadRowData(row, row_count, row_collection):
        """Loads the data in a specific row of the file.
        
        Args:
            row(list): containing the row data.
            row_count(int): the current row number.
            required_headers(list): column names that must exist.

        Return:
            rowdatacollection: updated with header row details.
        """
        if '!' in row[-1] or '#' in row[-1]:
            row_collection.addValue('comment', row[-1])
        new_row = [None] * 12

        # Add the row data in the order that it appears in the file
        # from left to right.
        for i in csv_enum.ITERABLE:
            if i < len(row):
                new_row = _disectEntry(i, row[i], new_row)

        for val, item in enumerate(new_row):
            row_collection.addValue(val, item)

    # First entry doesn't want to have a comma in front when formatting.
    row_collection = RowDataCollection()
    types = [1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 0]

    # Do the first entry separately because it has a different format string
    row_collection.initCollection(
        do.StringData(0, 0, format_str='{0}', default=''))
    for i, t in enumerate(types, 1):
        if t == 0:
            row_collection.initCollection(
                do.StringData(i, i, format_str=', {0}', default=''))
        else:
            row_collection.initCollection(
                do.FloatData(i, i, format_str=', {0}', default='',
                             no_of_dps=3))

    # Add a couple of extra rows to the row_collection for tracking the
    # data in the file.
    row_collection.initCollection(
        do.StringData(12, 'comment', format_str='{0}', default=''))
    row_collection.initCollection(
        do.StringData(13, 'actual_header', format_str='{0}', default=''))
    row_collection.initCollection(
        do.IntData(15, 'row_no', format_str=None, default=''))

    path = datafile.getAbsolutePath()
    try:
        logger.info('Loading data file contents from disc - %s' % (path))
        with open(path, 'rb') as csv_file:
            csv_file = csv.reader(csv_file)

            # Stores the comments found in the file
            comment_lines = []
            first_data_line = False
            line_count = 0

            try:
                # Loop through the contents list loaded from file line-by-line.
                for i, line in enumerate(csv_file, 0):

                    comment = hasCommentOnlyLine(''.join(line), comment_types)
                    if comment or comment == '':
                        comment_lines.append(comment)

                    # If we have a line that isn't a comment or a blank then it is going
                    # to contain materials entries.
                    else:
                        # First non-comment is the headers
                        if first_data_line == False:
                            first_data_line = True
                            _loadHeadData(line, row_collection)
                        else:
                            _loadRowData(line, i, row_collection)

                        row_collection.addValue('row_no', line_count)
                        line_count += 1
                        comment_lines.append(None)
            except IndexError:
                logger.error(
                    'This file is not setup/formatted correctly for a Materials.CSV file:\n'
                    + path)
                raise IndexError(
                    'File is not correctly formatted for a Materials.csv file')
            except AttributeError:
                logger.error(
                    'This file is not setup/formatted correctly for a Materials.CSV file:\n'
                    + path)
                raise AttributeError(
                    'File is not correctly formatted for a Materials.csv file')

    except IOError:
        logger.warning('Cannot load file - IOError')
        raise IOError('Cannot load file at: ' + path)

    # Just need to reset the has_changed variable because it will have been
    # set to True while loading everything in.
    for i in range(0, len(csv_enum.ITERABLE)):
        row_collection.getDataObject(i).has_changed = False

    return row_collection, comment_lines, subfile_details
예제 #6
0
def readBcFile(datafile):
    """Loads the contents of the BC Database file refernced by datafile.
    
    Loads the data from the file referenced by the given TuflowFile object into
    a :class:'rowdatacollection' and a list of comment only lines.
    
    Args:
        datafile(TuflowFile): TuflowFile object with file details.
        
    Return:
        tuple: rowdatacollection, comment_lines(list).
        
    See Also:
        :class:'rowdatacollection'.
    """
    value_seperator = ','
    comment_types = ['#', '!']
    bc_enum = dataobj.BcEnum()

    def _checkHeaders(row, required_headers):
        """Checks that any required headers can be found.
        
        Reviews the headers in the header row of the csv file to ensure that
        any specifically needed named column headers exist.
        
        Args:
            row(list): columns headers.
            required_headers(list): column names that must be included.
        
        Return:
            list if some headers not found of False otherwise.
        """
        # Check what we have in the header row
        head_check = True
        for r in required_headers:
            if not r in row:
                head_check = False
        if not head_check:
            logger.warning('Required header (' + r + ') not' +
                           'found in file: ' + path)
        return head_check

    def _loadHeadData(row, row_collection, required_headers):
        """Loads the column header data.
        
        Adds the file defined names for the headers to the rowdatacollection.
        
        Args:
            row(list): containing the row data.
            row_collection(rowdatacollection): for updating.
            required_headers(list): column names that must exist.
        
        Return:
            rowdatacollection: updated with header row details.
        """
        row_length = len(row)
        head_check = _checkHeaders(row, required_headers)
        for i, v in enumerate(bc_enum.ITERABLE):
            if i < row_length:
                row_collection.addValue('actual_header', row[i])

        return row_collection

    def _loadRowData(row, row_count, row_collection):
        """Loads the data in a specific row of the file.
        
        Args:
            row(list): containing the row data.
            row_count(int): the current row number.
            required_headers(list): column names that must exist.

        Return:
            rowdatacollection: updated with header row details.
        """
        if '!' in row[-1] or '#' in row[-1]:
            row_collection.addValue('comment', row[-1])

        # Add the row data in the order that it appears in the file
        # from left to right.
        for i in bc_enum.ITERABLE:
            if i < len(row):
                row_collection.addValue(i, row[i])

        return row_collection

    # Initialise the RowDataOjectCollection object with currect setup
    row_collection = RowDataCollection()
    for i, val in enumerate(bc_enum.ITERABLE):
        if i == 0:
            row_collection.initCollection(
                do.StringData(i, i, format_str='{0}', default=''))
        else:
            row_collection.initCollection(
                do.StringData(i, i, format_str=', {0}', default=''))

    row_collection.initCollection(
        do.StringData(0, 'actual_header', format_str=', {0}', default=''))
    row_collection.initCollection(
        do.IntData(15, 'row_no', format_str=None, default=''))

    path = datafile.getAbsolutePath()
    required_headers = ['Name', 'Source']
    try:
        logger.info('Loading data file contents from disc - %s' % (path))
        with open(path, 'rb') as csv_file:
            csv_file = csv.reader(csv_file)

            # Stores the comments found in the file
            comment_lines = []
            first_data_line = False
            row_count = 0
            # Loop through the contents list loaded from file line-by-line.
            for i, line in enumerate(csv_file, 0):

                comment = hasCommentOnlyLine(''.join(line), comment_types)
                if comment or comment == '':
                    comment_lines.append(comment)

                # If we have a line that isn't a comment or a blank then it is going
                # to contain materials entries.
                else:
                    # First non-comment is the headers
                    if first_data_line == False:
                        first_data_line = True
                        row_collection = _loadHeadData(line, row_collection,
                                                       required_headers)
                    else:
                        row_collection = _loadRowData(line, i, row_collection)
                        row_collection.addValue('row_no', row_count)
                        row_count += 1

                    comment_lines.append(None)

    except IOError:
        logger.warning('Cannot load file - IOError')
        raise IOError('Cannot load file at: ' + path)

    # Just need to reset the has_changed variable because it will have been
    # set to True while loading everything in.
    for i in range(0, len(bc_enum.ITERABLE)):
        row_collection.getDataObject(i).has_changed = False

    return row_collection, comment_lines
예제 #7
0
class BridgeUnit(AIsisUnit):
    """Subclass of AIsisUnit storing Isis Bridge Unit data.

    Note:
        This really an abstract class for any bridge unit and is not really
        intended to be used directly.

    Contains a reference to a rowdatacollection for storing and
    accessing all the row data. i.e. the geometry data for the section,
    containing the chainage, elevation, roughness, etc values.
    Methods for accessing the data in these objects and adding removing rows
    are available.
    """

    UNIT_TYPE = 'Bridge'
    CATEGORY = 'Bridge'

    UNIT_VARS = None

    def __init__(self, file_order):
        """Constructor.
        
        Args:
            fileOrder (int): The location of this unit in the file.
        """
        AIsisUnit.__init__(self, file_order)

        # Fill in the header values these contain the data at the top of the
        # section, such as the unit name and labels.
        self.head_data = {
            'upstream': '',
            'downstream': '',
            'remote_us': '',
            'remote_ds': '',
            'roughness_type': 'MANNING',
            'calibration_coef': 1,
            'skew_angle': 1,
            'width': 0,
            'dual_distance': 0,
            'no_of_orifices': 0,
            'orifice_flag': '',
            'op_lower': 0,
            'op_upper': 0,
            'op_cd': 0,
            'comment': '',
            'rowcount': 0,
            'row_count_additional': {
                'Opening': 1
            }
        }

        self.unit_type = BridgeUnit.UNIT_TYPE
        self.unit_category = BridgeUnit.CATEGORY
        self.has_datarows = True
        self.no_of_collections = 2
        self.unit_length = 0
        self.no_of_chainage_rows = 1
        self.no_of_opening_rows = 1
        self.no_of_culvert_rows = 0
        self.additional_row_collections = OrderedDict()

    def getNumberOfOpenings(self):
        """
        """
        return self.no_of_opening_rows

    def getArea(self):
        """Returns the cross sectional area of the bridge openings.    
        
        Return:
            Dict - containing the area of the opening(s). keys = 'total', then
                '1', '2', 'n' for all openings found.
        """
        return 0


#         areas = []
#         opening_data = self.additional_row_collections['Opening']
#         x_vals = self.row_collection.getRowDataAsList(rdt.CHAINAGE)
#         y_vals = self.row_collection.getRowDataAsList(rdt.ELEVATION)
#
#         start_vals = opening_data.getRowDataAsList(rdt.OPEN_START)
#         end_vals = opening_data.getRowDataAsList(rdt.OPEN_END)
#         soffit_vals = opening_data.getRowDataAsList(rdt.SOFFIT_LEVEL)
#         springing_vals = opening_data.getRowDataAsList(rdt.SPRINGING_LEVEL)
#         openings = zip(start_vals, end_vals, soffit_vals, springing_vals)
#
#         for i, x in enumerate(x_vals):
#
#             if math.fabs(x - )
#
#
#         i=0

    def readUnitData(self, unit_data, file_line):
        """Reads the unit data into the geometry objects.
        
        See Also:
            AIsisUnit
            
        Args: 
            unit_data (list): The section of the isis dat file pertaining to 
                this section. 
        """
        file_line = self._readHeadData(unit_data, file_line)
        self.name = self.head_data['upstream']
        file_line = self._readMainRowData(unit_data, file_line)
        file_line = self._readAdditionalRowData(unit_data, file_line)
        self.head_data['rowcount'] = self.row_collection.getNumberOfRows()

        for key, data in self.additional_row_collections.iteritems():
            self.head_data['row_count_additional'][key] = data.getNumberOfRows(
            )

        return file_line

    def _readHeadData(self, unit_data, file_line):
        """Format the header data for writing to file.
        
        Note:
            Must be implemented by subclass
            
        Raises:
            NotImplementedError: if not overriden by sub class.
        """
        raise NotImplementedError

    def _readMainRowData(self, unit_data, file_line):
        """Reads the units rows into the row collection.

        This is all the geometry data that occurs after the no of rows variable in
        the Bridge Units of the dat file.
        
        Args:
            unit_data (list): the data pertaining to this unit.
        """
        # Add the new row data types to the object collection
        # All of them must have type, output format, default value and position
        # in the row as the first variables in vars.
        # The others are DataType specific.
        self.row_collection = RowDataCollection()
        self.row_collection.initCollection(
            do.FloatData(0, rdt.CHAINAGE, format_str='{:>10}', no_of_dps=3))
        self.row_collection.initCollection(
            do.FloatData(1, rdt.ELEVATION, format_str='{:>10}', no_of_dps=3))
        self.row_collection.initCollection(
            do.FloatData(2,
                         rdt.ROUGHNESS,
                         format_str='{:>10}',
                         no_of_dps=3,
                         default=0.0))
        self.row_collection.initCollection(
            do.ConstantData(3,
                            rdt.EMBANKMENT, ('L', 'R'),
                            format_str='{:>11}',
                            default=''))

        self.unit_length = 6
        out_line = file_line + self.no_of_chainage_rows
        try:
            # Load the geometry data
            for i in range(file_line, out_line):

                # Put the values into the respective data objects
                # This is done based on the column widths set in the Dat file
                # for the river section.
                self.row_collection.addValue(rdt.CHAINAGE,
                                             unit_data[i][0:10].strip())
                self.row_collection.addValue(rdt.ELEVATION,
                                             unit_data[i][10:20].strip())
                self.row_collection.addValue(rdt.ROUGHNESS,
                                             unit_data[i][20:30].strip())
                # Might not exist
                try:
                    bank = unit_data[i][40:51].strip()
                except:
                    bank = ''
                self.row_collection.addValue(rdt.EMBANKMENT, bank)

        except NotImplementedError:
            logger.error(
                'Unable to read Unit Data(dataRowObject creation) - NotImplementedError'
            )
            raise

        self.no_of_opening_rows = int(unit_data[out_line].strip())
        self.unit_length += self.no_of_chainage_rows + 1
        return out_line + 1

    def getData(self):
        """Retrieve the data in this unit.

        See Also:
            AIsisUnit - getData()
            
        Returns:
            String list - output data formated the same as in the .DAT file.
        """
        out_data = self._getHeadData()
        out_data.extend(self._getRowData())
        out_data.extend(self._getAdditionalRowData())

        return out_data

    def _formatDataItem(self,
                        item,
                        col_width,
                        no_of_dps=None,
                        is_head_item=True,
                        align_right=True):
        """Format the given head data item for printing to file.
        """
        if is_head_item:
            item = self.head_data[item]
        if not no_of_dps == None:
            form = '%0.' + str(no_of_dps) + 'f'
            item = form % float(item)

        if align_right:
            final_str = '{:>' + str(col_width) + '}'
        else:
            final_str = '{:<' + str(col_width) + '}'
        return final_str.format(item)

    def _getRowData(self):
        """For all the rows in the river geometry section get the data from
        the rowdatacollection class.

        Returns:
            list - containing the formatted unit rows.
        """
        out_data = []
        no_of_rows = self.row_collection.getNumberOfRows()
        out_data.append(
            self._formatDataItem(no_of_rows, 10, is_head_item=False))
        for i in range(0, no_of_rows):
            out_data.append(self.row_collection.getPrintableRow(i))

        return out_data

    def _getAdditionalRowData(self):
        """Get the formatted row data for any additional row data objects.
        
        Returns:
            list - containing additional row data.
        """
        out_data = []
        for data in self.additional_row_collections.itervalues():
            no_of_rows = data.getNumberOfRows()
            out_data.append(
                self._formatDataItem(no_of_rows, 10, is_head_item=False))
            for i in range(0, no_of_rows):
                out_data.append(data.getPrintableRow(i))

        return out_data

    def _getHeadData(self):
        """Get the header data formatted for printing out
        
        Note:
            Must be implemented by concrete subclass.
        
        Raises:
            NotImplementedError: if not overridden by sub class
        """
        raise NotImplementedError

    def updateDataRow(self, row_vals, index, collection_name=None):
        """Updates the row at the given index in the river units row_collection.
        
        The row will be updated at the given index. 

        Args:
            row_vals(Dict): keys must be datunits.ROW_DATA_TYPES with a legal
                value assigned for the DataType. Chainage and Elevation MUST
                be included.
            index: the row to update. 
            collection_name=None(str): If None the self.row_collection
                with the bridges geometry data will be updated. If a string it
                will be looked for in the self.additional_row_collections
                dictionary or raise an AttributeError if it doesn't exist.

        Raises:
            KeyError: If collection_name key does not exist.
            IndexError: If the index does not exist.
            ValueError: If the given value is not accepted by the DataObject's. 
            
        See Also:
            ADataObject and subclasses for information on the parameters.
        """
        if not collection_name is None:
            if not collection_name in self.additional_row_collections.keys():
                raise KeyError(
                    'collection_name %s does not exist in row collection' %
                    (collection_name))

        # Call superclass method to add the new row
        AIsisUnit.updateDataRow(self, index=index, row_vals=row_vals)

    def addDataRow(self, row_vals, index=None, collection_name=None):
        """Adds a new row to one of this bridge units row_collection's.
        
        The new row will be added at the given index. If no index is given it
        will be appended to the end of the collection.
        
        If no chainage or elevation values are given an AttributeError will be 
        raised as they cannot have default values. All other values can be
        ommitted. If they are they will be given defaults.
        
        Examples:
            >>> import ship.isis.datunits.rdt as rdt
            >>> unit.addDataRow({rdt.CHAINAGE:5.0, rdt.ELEVATION:36.2}, index=4)

        Args:
            row_vals(Dict): keys must be datunits.rdt with a legal
                value assigned for the DataType. Chainage and Elevation MUST
                be included.
            index=None(int): the row to insert into. The existing row at the
                given index will be moved up by one.
            collection_name=None(str): If None the self.row_collection
                with the bridges geometry data will be updated. If a string it
                will be looked for in the self.additional_row_collections
                dictionary or raise an AttributeError if it doesn't exist.

        Raises:
            AttributeError: If CHAINAGE or ELEVATION are not given.
            KeyError: if the collection_name does not exist.
            IndexError: If the index does not exist.
            ValueError: If the given value is not accepted by the DataObject's. 
            
        See Also:
            ADataObject and subclasses for information on the parameters.
        """
        if not rdt.CHAINAGE in row_vals.keys(
        ) or not rdt.ELEVATION in row_vals.keys():
            logger.error('Required values of CHAINAGE and ELEVATION not given')
            raise AttributeError(
                'Required values of CHAINAGE and ELEVATION not given')

        if not collection_name is None:
            if not collection_name in self.additional_row_collections.keys():
                raise KeyError(
                    'collection_name %s does not exist in row collection' %
                    (collection_name))

        # Setup default values for arguments that aren't given
        kw = {}
        kw[rdt.CHAINAGE] = row_vals.get(rdt.CHAINAGE)
        kw[rdt.ELEVATION] = row_vals.get(rdt.ELEVATION)
        kw[rdt.ROUGHNESS] = row_vals.get(rdt.ROUGHNESS, 0.039)
        kw[rdt.PANEL_MARKER] = row_vals.get(rdt.PANEL_MARKER, False)
        kw[rdt.RPL] = row_vals.get(rdt.RPL, 1.0)
        kw[rdt.BANKMARKER] = row_vals.get(rdt.BANKMARKER, '')
        kw[rdt.EASTING] = row_vals.get(rdt.EASTING, 0.0)
        kw[rdt.NORTHING] = row_vals.get(rdt.NORTHING, 0.0)
        kw[rdt.DEACTIVATION] = row_vals.get(rdt.DEACTIVATION, '')
        kw[rdt.SPECIAL] = row_vals.get(rdt.SPECIAL, '')

        # Call superclass method to add the new row
        AIsisUnit.addDataRow(self,
                             index=index,
                             row_vals=kw,
                             collection_name=collection_name)

    def _checkChainageIncreaseNotNegative(self, index, chainageValue):
        """Checks that new chainage value is not not higher than the next one.

        If the given chainage value for the given index is higher than the
        value in the following row ISIS will give a negative chainage error.

        It will return true if the value is the last in the row.
        
        Args:
            index (int): The index that the value is to be added at.
            chainageValue (float): The chainage value to be added.
        
        Returns:
           False if greater or True if less.
        """
        if index == None:
            return True

        if not index == 0:
            if self.row_collection.getDataValue(rdt.CHAINAGE,
                                                index - 1) >= chainageValue:
                return False

        if self.row_collection.getDataValue(rdt.CHAINAGE,
                                            index) <= chainageValue:
            return False

        return True
예제 #8
0
class RiverUnit(AIsisUnit):
    """Concrete implementation of AIsisUnit storing Isis River Unit
    data.

    Contains a reference to a rowdatacollection for storing and
    accessing all the row data. i.e. the geometry data for the section,
    containing the chainage, elevation, roughness, etc values.

    Methods for accessing the data in these objects and adding removing rows
    are available.
    
    See Also:
        AIsisUnit
    """

    UNIT_TYPE = 'River'
    CATEGORY = 'River'
    FILE_KEY = 'RIVER'

    def __init__(self, file_order, reach_number):
        """Constructor.
        
        Args:
            fileOrder (int): The location of this unit in the file.
            reach_number (int): The reach ID for this unit.
        """
        AIsisUnit.__init__(self, file_order)

        # Fill in the header values these contain the data at the top of the
        # section, such as the unit name and labels.
        self.head_data = {
            'section_label': '',
            'spill1': '',
            'spill2': '',
            'lateral1': '',
            'lateral2': '',
            'lateral3': '',
            'lateral4': '',
            'distance': 0,
            'slope': '',
            'density': 1000,
            'comment': '',
            'rowcount': 0
        }

        self.unit_type = RiverUnit.UNIT_TYPE
        self.unit_category = RiverUnit.CATEGORY
        self.has_datarows = True
        self.reach_number = reach_number
        self.unit_length = 0

    def readUnitData(self, unit_data, file_line):
        """Reads the unit data into the geometry objects.
        
        See Also:
            AIsisUnit - readUnitData for more information.
        
        Args:
            unit_data (list): The section of the isis dat file pertaining 
                to this section 
        """
        file_line = self._readHeadData(unit_data, file_line)
        file_line = self._readRowData(unit_data, file_line)
        self.head_data['rowcount'] = self.row_collection.getNumberOfRows()
        return file_line - 1

    def _readHeadData(self, unit_data, file_line):
        """Format the header data for writing to file.
        
        Args:
            unit_data (list): containing the data to read.
        """
        self.head_data['comment'] = unit_data[file_line + 0][5:].strip()
        self.name = self.head_data['section_label'] = unit_data[
            file_line + 2][:12].strip()
        self.head_data['spill1'] = unit_data[file_line + 2][12:24].strip()
        self.head_data['spill2'] = unit_data[file_line + 2][24:36].strip()
        self.head_data['lateral1'] = unit_data[file_line + 2][36:48].strip()
        self.head_data['lateral2'] = unit_data[file_line + 2][48:60].strip()
        self.head_data['lateral3'] = unit_data[file_line + 2][60:72].strip()
        self.head_data['lateral4'] = unit_data[file_line + 2][72:84].strip()
        self.head_data['distance'] = unit_data[file_line + 3][0:10].strip()
        self.head_data['slope'] = unit_data[file_line + 3][10:30].strip()
        self.head_data['density'] = unit_data[file_line + 3][30:40].strip()
        self.unit_length = int(unit_data[file_line + 4].strip())
        return file_line + 5

    def _readRowData(self, unit_data, file_line):
        """Reads the units rows into the row collection.

        This is all the geometry data that occurs after the no of rows variable in
        the River Units of the dat file.
        
        Args:
            unit_data (list): the data pertaining to this unit.
        """
        # Add the new row data types to the object collection
        # All of them must have type, output format, default value and position
        # in the row as the first variables in vars.
        # The others are DataType specific.
        self.row_collection = RowDataCollection()
        self.row_collection.initCollection(
            do.FloatData(0, rdt.CHAINAGE, format_str='{:>10}', no_of_dps=3))
        self.row_collection.initCollection(
            do.FloatData(1, rdt.ELEVATION, format_str='{:>10}', no_of_dps=3))
        self.row_collection.initCollection(
            do.FloatData(2,
                         rdt.ROUGHNESS,
                         format_str='{:>10}',
                         default=0.0,
                         no_of_dps=3))
        self.row_collection.initCollection(
            do.SymbolData(3,
                          rdt.PANEL_MARKER,
                          '*',
                          format_str='{:<5}',
                          default=False))
        self.row_collection.initCollection(
            do.FloatData(4,
                         rdt.RPL,
                         format_str='{:>5}',
                         default=1.000,
                         no_of_dps=3))
        self.row_collection.initCollection(
            do.ConstantData(5,
                            rdt.BANKMARKER, ('LEFT', 'RIGHT', 'BED'),
                            format_str='{:<10}',
                            default=''))
        self.row_collection.initCollection(
            do.FloatData(6,
                         rdt.EASTING,
                         format_str='{:>10}',
                         default=0.0,
                         no_of_dps=2))
        self.row_collection.initCollection(
            do.FloatData(7,
                         rdt.NORTHING,
                         format_str='{:>10}',
                         default=0.0,
                         no_of_dps=2))
        self.row_collection.initCollection(
            do.ConstantData(8,
                            rdt.DEACTIVATION, ('LEFT', 'RIGHT'),
                            format_str='{:<10}',
                            default=''))
        # Default == '~' means to ignore formatting and apply '' when value is None
        self.row_collection.initCollection(
            do.StringData(9, rdt.SPECIAL, format_str='{:<10}', default='~'))

        out_line = file_line + self.unit_length
        try:
            # Load the geometry data
            for i in range(file_line, out_line):

                # Put the values into the respective data objects
                # This is done based on the column widths set in the Dat file
                # for the river section.
                self.row_collection.addValue(rdt.CHAINAGE,
                                             unit_data[i][0:10].strip())
                self.row_collection.addValue(rdt.ELEVATION,
                                             unit_data[i][10:20].strip())
                self.row_collection.addValue(rdt.ROUGHNESS,
                                             unit_data[i][20:30].strip())
                self.row_collection.addValue(rdt.PANEL_MARKER,
                                             unit_data[i][30:35].strip())
                self.row_collection.addValue(rdt.RPL,
                                             unit_data[i][35:40].strip())
                self.row_collection.addValue(rdt.BANKMARKER,
                                             unit_data[i][40:50].strip())
                self.row_collection.addValue(rdt.EASTING,
                                             unit_data[i][50:60].strip())
                self.row_collection.addValue(rdt.NORTHING,
                                             unit_data[i][60:70].strip())
                self.row_collection.addValue(rdt.DEACTIVATION,
                                             unit_data[i][70:80].strip())
                self.row_collection.addValue(rdt.SPECIAL,
                                             unit_data[i][80:90].strip())

        except NotImplementedError:
            logger.ERROR(
                'Unable to read Unit Data(dataRowObject creation) - NotImplementedError'
            )
            raise

        return out_line

    def getData(self):
        """Retrieve the data in this unit.

        The String[] returned is formatted for printing in the fashion
        of the .dat file.
        
        Return:
            List of strings formated for writing to .dat file.
        """
        out_data = self._getHeadData()
        out_data.extend(self._getRowData())

        return out_data

    def _getRowData(self):
        """Returns the row data in this class.
        
        For all the rows in the river geometry section get the data from
        the rowdatacollection class.
        
        Returns:
            list = containing the formatted unit rows.
        """
        out_data = []
        for i in range(0, self.row_collection.getNumberOfRows()):
            out_data.append(self.row_collection.getPrintableRow(i))

        return out_data

    def _getHeadData(self):
        """Get the header data formatted for printing out to file.
        
        Returns:
            List of strings - The formatted header list.
        """
        out_data = []
        self.head_data['rowcount'] = self.unit_length
        out_data.append('RIVER ' + self.head_data['comment'])
        out_data.append('SECTION')

        # Get the row with the section name and spill info from the formatter
        out_data.append(self._getHeadSectionRowFormat())

        out_data.append('{:>10}'.format(self.head_data['distance']) +
                        '{:>20}'.format(self.head_data['slope']) +
                        '{:>10}'.format(self.head_data['density']))
        out_data.append('{:>10}'.format(self.head_data['rowcount']))

        return out_data

    def _getHeadSectionRowFormat(self):
        """Formats the section name and spill file row according to contents.

        This is quite a pedantic method. Essentially if there are spills in the
        line of the file they each get 12 spaces. However if it's just the         
        one spill there the whitespace is cut off the end. Isis is pretty 
        weird about white space so it's best to get it right.
        
        Returns:
            string containing row data with whitespace trimmed from the right 
                side.
        """
        section_row = '{:<12}'.format(self.head_data['section_label'])
        if not self.head_data['spill1'] == '':
            section_row += '{:<12}'.format(self.head_data['spill1'])
        if not self.head_data['spill2'] == '':
            section_row += '{:<12}'.format(self.head_data['spill2'])

        section_row = section_row.rstrip()
        return section_row

    def updateDataRow(self, row_vals, index):
        """Updates the row at the given index in the river units row_collection.
        
        The row will be updated at the given index. 

        Args:
            row_vals(Dict): keys must be datunits.ROW_DATA_TYPES with a legal
                value assigned for the DataType. Chainage and Elevation MUST
                be included.
            index: the row to update. 

        Raises:
            AttributeError: If CHAINAGE or ELEVATION are not given.
            IndexError: If the index does not exist.
            ValueError: If the given value is not accepted by the DataObject's. 
            
        See Also:
            ADataObject and subclasses for information on the parameters.
        """
        # Call superclass method to add the new row
        AIsisUnit.updateDataRow(self, index=index, row_vals=row_vals)

    def addDataRow(self, row_vals, index=None):
        """Adds a new row to the river units row_collection.
        
        The new row will be added at the given index. If no index is given it
        will be appended to the end of the collection.
        
        If no chainage or elevation values are given a AttributeError will be 
        raised as they cannot have default values. All other values can be
        ommitted. If they are they will be given defaults.
        
        Examples:
            >>> import ship.isis.datunits.ROW_DATA_TYPES as rdt
            >>> river_unit.addDataRow({rdt.CHAINAGE:5.0, rdt.ELEVATION:36.2}, index=4)

        Args:
            row_vals(Dict): keys must be datunits.ROW_DATA_TYPES with a legal
                value assigned for the DataType. Chainage and Elevation MUST
                be included.
            index=None(int): the row to insert into. The existing row at the
                given index will be moved up by one.

        Raises:
            AttributeError: If CHAINAGE or ELEVATION are not given.
            IndexError: If the index does not exist.
            ValueError: If the given value is not accepted by the DataObject's. 
            
        See Also:
            ADataObject and subclasses for information on the parameters.
        """
        if not rdt.CHAINAGE in row_vals.keys(
        ) or not rdt.ELEVATION in row_vals.keys():
            logger.error('Required values of CHAINAGE and ELEVATION not given')
            raise AttributeError(
                'Required values of CHAINAGE and ELEVATION not given')

        # Setup default values for arguments that aren't given
        kw = {}
        kw[rdt.CHAINAGE] = row_vals.get(rdt.CHAINAGE)
        kw[rdt.ELEVATION] = row_vals.get(rdt.ELEVATION)
        kw[rdt.ROUGHNESS] = row_vals.get(rdt.ROUGHNESS, 0.039)
        kw[rdt.PANEL_MARKER] = row_vals.get(rdt.PANEL_MARKER, False)
        kw[rdt.RPL] = row_vals.get(rdt.RPL, 1.0)
        kw[rdt.BANKMARKER] = row_vals.get(rdt.BANKMARKER, '')
        kw[rdt.EASTING] = row_vals.get(rdt.EASTING, 0.0)
        kw[rdt.NORTHING] = row_vals.get(rdt.NORTHING, 0.0)
        kw[rdt.DEACTIVATION] = row_vals.get(rdt.DEACTIVATION, '')
        kw[rdt.SPECIAL] = row_vals.get(rdt.SPECIAL, '')

        # Call superclass method to add the new row
        AIsisUnit.addDataRow(self, index=index, row_vals=kw)