def setupRowCollection(): """Setup the RowDataCollection for loading the data into. """ # First entry doesn't want to have a comma in front when formatting. row_collection = RowDataCollection() types = [0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1] # Do the first entry separately because it has a different format string row_collection.initCollection( do.StringData(0, 0, format_str='{0}', default='')) for i, t in enumerate(types, 1): if t == 0: row_collection.initCollection( do.StringData(i, i, format_str=', {0}', default='')) else: row_collection.initCollection( do.FloatData(i, i, format_str=', {0}', no_of_dps=3, default=0.00)) # Add a couple of extra rows to the row_collection for tracking the # data in the file. row_collection.initCollection(do.IntData(15, 'row_no')) return row_collection
def _readArchRowData(self, unit_data, file_line): """Load the data defining the openings in the bridge. Args: unit_data (list): the data pertaining to this unit. TODO: Change the name of this function to _readOpeningRowData. """ # Add the new row data types to the object collection # All of them must have type, output format, default value and position # in the row as the first variables in vars. # The others are DataType specific. self.additional_row_collections['Opening'] = RowDataCollection() self.additional_row_collections['Opening'].initCollection( do.FloatData(0, rdt.OPEN_START, format_str='{:>10}', no_of_dps=3)) self.additional_row_collections['Opening'].initCollection( do.FloatData(1, rdt.OPEN_END, format_str='{:>10}', no_of_dps=3)) self.additional_row_collections['Opening'].initCollection( do.FloatData(2, rdt.SPRINGING_LEVEL, format_str='{:>10}', no_of_dps=3, default=0.0)) self.additional_row_collections['Opening'].initCollection( do.FloatData(3, rdt.SOFFIT_LEVEL, format_str='{:>10}', no_of_dps=3, default=0.0)) out_line = file_line + self.no_of_opening_rows try: # Load the geometry data for i in range(file_line, out_line): # Put the values into the respective data objects # This is done based on the column widths set in the Dat file # for the river section. self.additional_row_collections['Opening'].addValue( rdt.OPEN_START, unit_data[i][0:10].strip()) self.additional_row_collections['Opening'].addValue( rdt.OPEN_END, unit_data[i][10:20].strip()) self.additional_row_collections['Opening'].addValue( rdt.SPRINGING_LEVEL, unit_data[i][20:30].strip()) self.additional_row_collections['Opening'].addValue( rdt.SOFFIT_LEVEL, unit_data[i][30:40].strip()) except NotImplementedError: logger.error( 'Unable to read Unit Data(dataRowObject creation) - NotImplementedError' ) raise self.no_of_culvert_rows = int(unit_data[out_line].strip()) self.unit_length += self.no_of_culvert_rows + 1 return out_line
def _readRowData(self, unit_data, file_line): """Reads the units rows into the row collection. This is all the geometry data that occurs after the no of rows variable in the Spill Units of the dat file. Args: unit_data: the data pertaining to this unit. """ # Add the new row data types to the object collection # All of them must have type, output format, default value and position # in the row as the first variables in vars. # The others are DataType specific. self.row_collection = RowDataCollection() self.row_collection.initCollection(do.FloatData(0, rdt.CHAINAGE, format_str='{:>10}', no_of_dps=3)) self.row_collection.initCollection(do.FloatData(1, rdt.ELEVATION, format_str='{:>10}', no_of_dps=3)) self.row_collection.initCollection(do.FloatData(2, rdt.EASTING, format_str='{:>10}', no_of_dps=2, default=0.0)) self.row_collection.initCollection(do.FloatData(3, rdt.NORTHING, format_str='{:>10}', no_of_dps=2, default=0.0)) out_line = file_line + self.unit_length try: # Load the geometry data for i in range(file_line, out_line): # Put the values into the respective data objects # This is done based on the column widths set in the Dat file # for the spill section. self.row_collection.addValue(rdt.CHAINAGE, unit_data[i][0:10].strip()) self.row_collection.addValue(rdt.ELEVATION, unit_data[i][10:20].strip()) # In some edge cases there are no values set in the file for the # easting and northing, so use defaults. if not len(unit_data[i]) > 21: self.row_collection.addValue(rdt.EASTING) self.row_collection.addValue(rdt.NORTHING) else: self.row_collection.addValue(rdt.EASTING, unit_data[i][20:30].strip()) self.row_collection.addValue(rdt.NORTHING, unit_data[i][30:40].strip()) except NotImplementedError: logger.ERROR('Unable to read Unit Data(dataRowObject creation) - NotImplementedError') raise return out_line
def test_negativeChainageCheck_method(self): '''Tests the negative chainage check method. @note: The method doesnot need to check for any index issues because that is done in the calling method. ''' # Create RiverUnit object and give it a chainage object with some data river = riverunit.RiverUnit(1, 1) chainage = do.FloatData(0, rdt.CHAINAGE, format_str='{:>10}', default=None, no_of_dps=3) chainage.data_collection = self.chainage chainage.record_length = 18 river.row_collection = RowDataCollection() river.row_collection._collection.append(chainage) # check that we catch a negative chainage increase - > value to the right self.assertFalse(river._checkChainageIncreaseNotNegative(6, 10.42), 'Catch negative chainage increase fail (1)') # check that we catch a negative chainage increase - < value to the left self.assertFalse(river._checkChainageIncreaseNotNegative(5, 8.4), 'Catch negative chainage increase fail (2)') # Check that we don't stop a non-negative chainage increase. self.assertTrue(river._checkChainageIncreaseNotNegative(7, 10.42), 'Let non-negative increase through fail') # check that we can insert at the end. self.assertTrue(river._checkChainageIncreaseNotNegative(17, 19.418), 'Let non-negative increase at end of list through fail')
class SpillUnit (AIsisUnit): """Concrete implementation of AIsisUnit storing Isis Spill Unit data. Contains a reference to a rowdatacollection for storing and accessing all the row data. i.e. the geometry data for the section, containing the chainage, elevation, etc values. Methods for accessing the data in these objects and adding removing rows are available. See Also: AIsisUnit """ # Name constants the values dictionary CHAINAGE = 'chainage' ELEVATION = 'elevation' EASTING = 'easting' NORTHING = 'northing' UNIT_TYPE = 'Spill' CATEGORY = 'Spill' FILE_KEY = 'SPILL' def __init__(self, file_order): """Constructor. Args: fileOrder (int): The location of this unit in the file. """ AIsisUnit.__init__(self, file_order) # Fill in the header values these contain the data at the top of the # section, such as the unit name and labels. self.head_data = {'section_label': '', 'spill_ds': '', 'coeff': 0, 'modular_limit': 0, 'comment': '', 'rowcount': 0} self.unit_type = SpillUnit.UNIT_TYPE self.unit_category = SpillUnit.CATEGORY self.has_datarows = True self.unit_length = 0 def readUnitData(self, unit_data, file_line): """Reads the unit data into the geometry objects. Args: unit_data (list): The part of the isis dat file pertaining to this section See Also: AIsisUnit - readUnitData() """ file_line = self._readHeadData(unit_data, file_line) self.name = self.head_data['section_label'] file_line = self._readRowData(unit_data, file_line) self.head_data['rowcount'] = self.row_collection.getNumberOfRows() return file_line - 1 def _readHeadData(self, unit_data, file_line): """Reads the data in the file header section into the class. Args: unit_data (list): contains data for this unit. """ self.head_data['comment'] = unit_data[file_line][5:].strip() self.name = self.head_data['section_label'] = unit_data[file_line + 1][:12].strip() self.head_data['spill_ds'] = unit_data[file_line + 1][12:24].strip() self.head_data['coeff'] = unit_data[file_line + 2][:10].strip() self.head_data['modular_limit'] = unit_data[file_line + 2][10:20].strip() self.unit_length = int(unit_data[file_line + 3].strip()) return file_line + 4 def _readRowData(self, unit_data, file_line): """Reads the units rows into the row collection. This is all the geometry data that occurs after the no of rows variable in the Spill Units of the dat file. Args: unit_data: the data pertaining to this unit. """ # Add the new row data types to the object collection # All of them must have type, output format, default value and position # in the row as the first variables in vars. # The others are DataType specific. self.row_collection = RowDataCollection() self.row_collection.initCollection(do.FloatData(0, rdt.CHAINAGE, format_str='{:>10}', no_of_dps=3)) self.row_collection.initCollection(do.FloatData(1, rdt.ELEVATION, format_str='{:>10}', no_of_dps=3)) self.row_collection.initCollection(do.FloatData(2, rdt.EASTING, format_str='{:>10}', no_of_dps=2, default=0.0)) self.row_collection.initCollection(do.FloatData(3, rdt.NORTHING, format_str='{:>10}', no_of_dps=2, default=0.0)) out_line = file_line + self.unit_length try: # Load the geometry data for i in range(file_line, out_line): # Put the values into the respective data objects # This is done based on the column widths set in the Dat file # for the spill section. self.row_collection.addValue(rdt.CHAINAGE, unit_data[i][0:10].strip()) self.row_collection.addValue(rdt.ELEVATION, unit_data[i][10:20].strip()) # In some edge cases there are no values set in the file for the # easting and northing, so use defaults. if not len(unit_data[i]) > 21: self.row_collection.addValue(rdt.EASTING) self.row_collection.addValue(rdt.NORTHING) else: self.row_collection.addValue(rdt.EASTING, unit_data[i][20:30].strip()) self.row_collection.addValue(rdt.NORTHING, unit_data[i][30:40].strip()) except NotImplementedError: logger.ERROR('Unable to read Unit Data(dataRowObject creation) - NotImplementedError') raise return out_line def getData(self): """Retrieve the data in this unit. The String[] returned is formatted for printing in the fashion of the .dat file. Returns: list of output data formated the same as in the .DAT file. """ out_data = self._getHeadData() out_data.extend(self._getRowData()) return out_data def _getRowData(self): """Get the data in the row collection. For all the rows in the spill geometry section get the data from the rowdatacollection class. Returns: list containing the formatted unit rows. """ out_data = [] for i in range(0, self.row_collection.getNumberOfRows()): out_data.append(self.row_collection.getPrintableRow(i)) return out_data def _getHeadData(self): """Get the header data formatted for printing out. Returns: list - contining the formatted head data. """ out_data = [] self.head_data['rowcount'] = self.unit_length out_data.append('SPILL ' + self.head_data['comment']) # Get the row with the section name and spill info from the formatter out_data.append('{:<12}'.format(self.head_data['section_label']) + '{:<12}'.format(self.head_data['spill_ds']) ) out_data.append('{:>10}'.format(self.head_data['coeff']) + '{:>10}'.format(self.head_data['modular_limit']) ) out_data.append('{:>10}'.format(self.head_data['rowcount'])) return out_data def addDataRow(self, chainage, elevation, index=None, easting = 0.00, northing = 0.00): """Adds a new row to the bridge unit. Ensures that certain requirements of the data rows, such as the chainage needing to increase for each row down are met, then call the addNewRow() method in the row_collection. Args: chainage (float): chainage value. Must not be less than the previous chaninage in the collection. elevation (float): elevation in datum. index (int): stating the position to insert the new row - Optional. If no value is given it will be appended to the end of the data_object The other values are all optional and will be set to defaults if not given. Returns: False if the addNewRow() method is unsuccessful. Raises: IndexError: If the index does not exist. ValueError: If the given value is not accepted by the DataObjects. See Also: ADataObject and subclasses for information on the parameters. """ # If it greater than the record length then raise an index error if index > self.row_collection.getNumberOfRows(): raise IndexError ('Given index out of bounds of row_collection') # If it's the same as the record length then we can set index to None # type and it will be appended instead of inserted. if index == self.row_collection.getNumberOfRows(): index = None # Check that there won't be a negative change in chainage across row. if self._checkChainageIncreaseNotNegative(index, chainage) == False: raise ValueError ('Chainage increase cannot be negative') # Call the row collection add row method to add the new row. self.row_collection.addNewRow(values_dict={'chainage': chainage, 'elevation': elevation, 'easting': easting, 'northing': northing}, index=index) def _checkChainageIncreaseNotNegative(self, index, chainageValue): """Checks that new chainage value is not not higher than the next one. If the given chainage value for the given index is higher than the value in the following row ISIS will give a negative chainage error. It will return true if the value is the last in the row. Args: index (int): The index that the value is to be added at. chainageValue (float): The chainage value to be added. Returns: False if greater or True if less. """ if index == None: return True if not index == 0: if self.row_collection.getDataValue('chainage', index - 1) >= chainageValue: return False if self.row_collection.getDataValue('chainage', index) <= chainageValue: return False return True
def readTmfFile(datafile): """Loads the contents of the Materials CSV file referenced by datafile. Loads the data from the file referenced by the given TuflowFile object into a :class:'rowdatacollection' and a list of comment only lines. Args: datafile(TuflowFile): TuflowFile object with file details. Return: tuple: rowdatacollection, comment_lines(list). See Also: :class:'rowdatacollection'. """ value_separator = ',' comment_types = ['#', '!'] tmf_enum = dataobj.TmfEnum() path = datafile.getAbsolutePath() value_order = range(11) row_collection = RowDataCollection() row_collection.initCollection(do.IntData(0, 0, format_str=None, default='')) for i in range(1, 11): row_collection.initCollection( do.FloatData(i, i, format_str=', {0}', default='', no_of_dps=3)) # Keep track of any comment lines and the row numbers as well row_collection.initCollection( do.StringData(11, 'comment', format_str=' ! {0}', default='')) row_collection.initCollection( do.IntData(12, 'row_no', format_str=None, default='')) contents = [] logger.info('Loading data file contents from disc - %s' % (path)) contents = _loadFileFromDisc(path) # Stores the comments found in the file comment_lines = [] # Loop through the contents list loaded from file line-by-line. first_data_line = False row_count = 0 for i, line in enumerate(contents, 0): comment = hasCommentOnlyLine(line, comment_types) if comment or comment == '': comment_lines.append(comment) # If we have a line that isn't a comment or a blank then it is going # to contain materials entries. else: comment_lines.append(None) row_collection = _loadRowData(line, row_count, row_collection, tmf_enum.ITERABLE, comment_types, value_separator) row_count += 1 # Just need to reset the has_changed variable because it will have been # set to True while loading everything in. for i in range(0, len(value_order)): row_collection.getDataObject(value_order[i]).has_changed = False return row_collection, comment_lines
def readMatSubfile(main_datafile, filename, header_list): #path, root, header1, header2): """ """ value_separator = ',' comment_types = ['#', '!'] mat_subfile_enum = dataobj.SubfileMatEnum() path = os.path.join(main_datafile.root, filename) root = main_datafile.root header1 = 'None' header2 = 'None' if len(header_list) > 0: header1 = header_list[0] if len(header_list) > 1: header2 = header_list[1] def _scanfile(filepath): """Scans the file before we do any loading to identify the contents. Need to do this because the file can be setup in so many way that it becomes a headache to work it out in advance. Better to take a little bit of extra processing time and do some quick checks first. Arguments: file_path (str): the path to the subfile. Return: tuple: list: booleans with whether the column contains data that we want or not. int: length of the cols list. list: containing all of the first row column data int: first row with usable data on. """ logger.debug('Scanning Materials file - %s' % (filepath)) with open(filepath, 'rb') as csv_file: csv_file = csv.reader(csv_file) cols = [] head_list = [] start_row = -1 for i, row in enumerate(csv_file, 0): if "".join(row).strip() == "": break for j, col in enumerate(row, 0): if i == 0: cols.append(False) head_list = row elif uuf.isNumeric(col): cols[j] = True if start_row == -1: start_row = i elif cols[j] == True: break return cols, len(cols), head_list, start_row def _loadHeadData(row, row_collection, col_length): """ """ new_row = [None] * 12 comment_indices, length = uuf.findSubstringInList('!', row) comment_lines.append(None) head1_location = -1 head2_location = -1 row_length = len(row) for i in range(0, col_length): if i < row_length: entry = row[i].strip() if entry == header1: head1_location = i if entry == header2: head2_location = i row_collection.addValue('actual_header', entry) return row_collection, head1_location, head2_location def _loadRowData(row, row_count, row_collection, comment_lines, col_length, start_row): """Loads the data in a specific row of the file. Args: row(list): containing the row data. row_count(int): the current row number. required_headers(list): column names that must exist. Return: rowdatacollection: updated with header row details. """ # Any lines that aren't headers, but are above the first row to contain # actual data will be stored as comment lines if row_count < start_row: comment_lines.append(row) return row_collection, comment_lines else: comment_lines.append(None) if '!' in row[-1] or '#' in row[-1]: row_collection.addValue('comment', row[-1]) # Add the row data in the order that it appears in the file # from left to right. for i in range(col_length): if i < len(row): row_collection.addValue(i, row[i]) return row_collection, comment_lines try: logger.info('Loading data file contents from disc - %s' % (path)) with open(path, 'rb') as csv_file: csv_file = csv.reader(csv_file) # Do a quick check of the file setup cols, col_length, head_list, start_row = _scanfile(path) # First entry doesn't want to have a comma in front when formatting. # but all of the others do. row_collection = RowDataCollection() row_collection.initCollection( do.FloatData(0, 0, format_str=' {0}', default='', no_of_dps=6)) for i in range(1, len(cols)): if cols[i] == True: row_collection.initCollection( do.FloatData(i, i, format_str=', {0}', default='', no_of_dps=6)) else: row_collection.initCollection( do.StringData(i, i, format_str=', {0}', default='')) row_collection.initCollection( do.StringData(0, 'actual_header', format_str='{0}', default='')) row_collection.initCollection( do.IntData(15, 'row_no', format_str=None, default='')) # Stores the comments found in the file comment_lines = [] first_data_line = False # Loop through the contents list loaded from file line-by-line. for i, line in enumerate(csv_file, 0): comment = hasCommentOnlyLine(''.join(line), comment_types) if comment or comment == '': comment_lines.append([comment, i]) # If we have a line that isn't a comment or a blank then it is going # to contain materials entries. else: # First non-comment is the headers if first_data_line == False: first_data_line = True row_collection, head1_loc, head2_loc = _loadHeadData( line, row_collection, col_length) else: row_collection, comment_lines = _loadRowData( line, i, row_collection, comment_lines, col_length, start_row) row_collection.addValue('row_no', i) except IOError: logger.warning('Cannot load file - IOError') raise IOError('Cannot load file at: ' + path) path_holder = filetools.PathHolder(path, root) mat_sub = dataobj.DataFileSubfileMat(path_holder, row_collection, comment_lines, path_holder.file_name, head1_loc, head2_loc) return mat_sub
def readMatCsvFile(datafile): """Loads the contents of the Materials CSV file referenced by datafile. Loads the data from the file referenced by the given TuflowFile object into a :class:'rowdatacollection' and a list of comment only lines. Args: datafile(TuflowFile): TuflowFile object with file details. Return: tuple: rowdatacollection, comment_lines(list). See Also: :class:'rowdatacollection'. """ value_seperator = ',' comment_types = ['#', '!'] csv_enum = dataobj.MatCsvEnum() subfile_details = {} def _loadHeadData(row, row_collection): """ """ new_row = [None] * 12 if '!' in row[-1] or '#' in row[-1]: row_collection.addValue('comment', row[-1]) new_row[0] = row[0] new_row[1] = row[1] new_row[9] = row[2] new_row[11] = row[3] row_length = len(new_row) for i, v in enumerate(new_row): if i < row_length: row_collection.addValue('actual_header', new_row[i]) return row_collection def _disectEntry(col_no, entry, new_row): """Breaks the row values into the appropriate object values. The materials file can have Excel style sub-values. i.e. it can have seperate columns defined within a bigger one. This function will break those values down into a format usable by the values initiated in the rowdatacollection. Args: col_no(int): the current column number. entry(string): the value of the current column. new_row(list): the row values to update. Return: list containing the updated row values. Note: This isn't very nice. Need to clean it up and find a better, safer way of dealing with breaking the row data up. It may be excess work but perhaps creating an xml converter could work quite will and make dealing with the file a bit easier? """ made_change = False # Put in ID and Hazard as normal if col_no == 0: new_row[0] = entry elif col_no == 11: new_row[11] = entry # Possible break up Manning's entry further elif col_no == 1: # See if there's more than one value in the Manning's category. splitval = entry.split(',') # If there is and it's numeric then it's a single value for 'n' if len(splitval) == 1: if uuf.isNumeric(splitval[0]): new_row[1] = splitval[0] # Otherwise it's a filename. These can be further separated # into two column headers to read from the sub files. else: strsplit = splitval[0].split('|') if len(strsplit) == 1: subfile_details[strsplit[0].strip()] = [] new_row[6] = strsplit[0].strip() elif len(strsplit) == 2: subfile_details[strsplit[0]] = [strsplit[1].strip()] new_row[6] = strsplit[0].strip() new_row[7] = strsplit[1].strip() else: subfile_details[strsplit[0]] = [ strsplit[1].strip(), strsplit[2].strip() ] new_row[6] = strsplit[0].strip() new_row[7] = strsplit[1].strip() new_row[8] = strsplit[2].strip() # If there's more than one value then it must be the Manning's # depth curve values (N1, Y1, N2, Y2). else: new_row[2] = splitval[0] new_row[3] = splitval[1] new_row[4] = splitval[2] new_row[5] = splitval[3] # Finally grab the infiltration parameters (IL, CL) elif col_no == 2: splitval = entry.split(',') new_row[9] = splitval[0] new_row[10] = splitval[1] return new_row def _loadRowData(row, row_count, row_collection): """Loads the data in a specific row of the file. Args: row(list): containing the row data. row_count(int): the current row number. required_headers(list): column names that must exist. Return: rowdatacollection: updated with header row details. """ if '!' in row[-1] or '#' in row[-1]: row_collection.addValue('comment', row[-1]) new_row = [None] * 12 # Add the row data in the order that it appears in the file # from left to right. for i in csv_enum.ITERABLE: if i < len(row): new_row = _disectEntry(i, row[i], new_row) for val, item in enumerate(new_row): row_collection.addValue(val, item) # First entry doesn't want to have a comma in front when formatting. row_collection = RowDataCollection() types = [1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 0] # Do the first entry separately because it has a different format string row_collection.initCollection( do.StringData(0, 0, format_str='{0}', default='')) for i, t in enumerate(types, 1): if t == 0: row_collection.initCollection( do.StringData(i, i, format_str=', {0}', default='')) else: row_collection.initCollection( do.FloatData(i, i, format_str=', {0}', default='', no_of_dps=3)) # Add a couple of extra rows to the row_collection for tracking the # data in the file. row_collection.initCollection( do.StringData(12, 'comment', format_str='{0}', default='')) row_collection.initCollection( do.StringData(13, 'actual_header', format_str='{0}', default='')) row_collection.initCollection( do.IntData(15, 'row_no', format_str=None, default='')) path = datafile.getAbsolutePath() try: logger.info('Loading data file contents from disc - %s' % (path)) with open(path, 'rb') as csv_file: csv_file = csv.reader(csv_file) # Stores the comments found in the file comment_lines = [] first_data_line = False line_count = 0 try: # Loop through the contents list loaded from file line-by-line. for i, line in enumerate(csv_file, 0): comment = hasCommentOnlyLine(''.join(line), comment_types) if comment or comment == '': comment_lines.append(comment) # If we have a line that isn't a comment or a blank then it is going # to contain materials entries. else: # First non-comment is the headers if first_data_line == False: first_data_line = True _loadHeadData(line, row_collection) else: _loadRowData(line, i, row_collection) row_collection.addValue('row_no', line_count) line_count += 1 comment_lines.append(None) except IndexError: logger.error( 'This file is not setup/formatted correctly for a Materials.CSV file:\n' + path) raise IndexError( 'File is not correctly formatted for a Materials.csv file') except AttributeError: logger.error( 'This file is not setup/formatted correctly for a Materials.CSV file:\n' + path) raise AttributeError( 'File is not correctly formatted for a Materials.csv file') except IOError: logger.warning('Cannot load file - IOError') raise IOError('Cannot load file at: ' + path) # Just need to reset the has_changed variable because it will have been # set to True while loading everything in. for i in range(0, len(csv_enum.ITERABLE)): row_collection.getDataObject(i).has_changed = False return row_collection, comment_lines, subfile_details
def readBcFile(datafile): """Loads the contents of the BC Database file refernced by datafile. Loads the data from the file referenced by the given TuflowFile object into a :class:'rowdatacollection' and a list of comment only lines. Args: datafile(TuflowFile): TuflowFile object with file details. Return: tuple: rowdatacollection, comment_lines(list). See Also: :class:'rowdatacollection'. """ value_seperator = ',' comment_types = ['#', '!'] bc_enum = dataobj.BcEnum() def _checkHeaders(row, required_headers): """Checks that any required headers can be found. Reviews the headers in the header row of the csv file to ensure that any specifically needed named column headers exist. Args: row(list): columns headers. required_headers(list): column names that must be included. Return: list if some headers not found of False otherwise. """ # Check what we have in the header row head_check = True for r in required_headers: if not r in row: head_check = False if not head_check: logger.warning('Required header (' + r + ') not' + 'found in file: ' + path) return head_check def _loadHeadData(row, row_collection, required_headers): """Loads the column header data. Adds the file defined names for the headers to the rowdatacollection. Args: row(list): containing the row data. row_collection(rowdatacollection): for updating. required_headers(list): column names that must exist. Return: rowdatacollection: updated with header row details. """ row_length = len(row) head_check = _checkHeaders(row, required_headers) for i, v in enumerate(bc_enum.ITERABLE): if i < row_length: row_collection.addValue('actual_header', row[i]) return row_collection def _loadRowData(row, row_count, row_collection): """Loads the data in a specific row of the file. Args: row(list): containing the row data. row_count(int): the current row number. required_headers(list): column names that must exist. Return: rowdatacollection: updated with header row details. """ if '!' in row[-1] or '#' in row[-1]: row_collection.addValue('comment', row[-1]) # Add the row data in the order that it appears in the file # from left to right. for i in bc_enum.ITERABLE: if i < len(row): row_collection.addValue(i, row[i]) return row_collection # Initialise the RowDataOjectCollection object with currect setup row_collection = RowDataCollection() for i, val in enumerate(bc_enum.ITERABLE): if i == 0: row_collection.initCollection( do.StringData(i, i, format_str='{0}', default='')) else: row_collection.initCollection( do.StringData(i, i, format_str=', {0}', default='')) row_collection.initCollection( do.StringData(0, 'actual_header', format_str=', {0}', default='')) row_collection.initCollection( do.IntData(15, 'row_no', format_str=None, default='')) path = datafile.getAbsolutePath() required_headers = ['Name', 'Source'] try: logger.info('Loading data file contents from disc - %s' % (path)) with open(path, 'rb') as csv_file: csv_file = csv.reader(csv_file) # Stores the comments found in the file comment_lines = [] first_data_line = False row_count = 0 # Loop through the contents list loaded from file line-by-line. for i, line in enumerate(csv_file, 0): comment = hasCommentOnlyLine(''.join(line), comment_types) if comment or comment == '': comment_lines.append(comment) # If we have a line that isn't a comment or a blank then it is going # to contain materials entries. else: # First non-comment is the headers if first_data_line == False: first_data_line = True row_collection = _loadHeadData(line, row_collection, required_headers) else: row_collection = _loadRowData(line, i, row_collection) row_collection.addValue('row_no', row_count) row_count += 1 comment_lines.append(None) except IOError: logger.warning('Cannot load file - IOError') raise IOError('Cannot load file at: ' + path) # Just need to reset the has_changed variable because it will have been # set to True while loading everything in. for i in range(0, len(bc_enum.ITERABLE)): row_collection.getDataObject(i).has_changed = False return row_collection, comment_lines
def _readOrificeRowData(self, unit_data, file_line): """Load the data defining the orifice openings in the bridge. Args: unit_data (list): the data pertaining to this unit. TODO: These errors are cryptic here as they're very specific to the RowDataCollections being accessed. Perhaps these should be made a little more relevant by raising a different error. Or they could be dealt with better here. """ # Add the new row data types to the object collection # All of them must have type, output format, default value and position # in the row as the first variables in vars. # The others are DataType specific. self.additional_row_collections['Orifice'] = RowDataCollection() self.additional_row_collections['Orifice'].initCollection( do.FloatData(0, rdt.CULVERT_INVERT, format_str='{:>10}', no_of_dps=3)) self.additional_row_collections['Orifice'].initCollection( do.FloatData(1, rdt.CULVERT_SOFFIT, format_str='{:>10}', no_of_dps=3)) self.additional_row_collections['Orifice'].initCollection( do.FloatData(2, rdt.CULVERT_AREA, format_str='{:>10}', no_of_dps=3, default=0.0)) self.additional_row_collections['Orifice'].initCollection( do.FloatData(3, rdt.CULVERT_CD_PART, format_str='{:>10}', no_of_dps=3, default=0.0)) self.additional_row_collections['Orifice'].initCollection( do.FloatData(4, rdt.CULVERT_CD_FULL, format_str='{:>10}', no_of_dps=3, default=0.0)) self.additional_row_collections['Orifice'].initCollection( do.FloatData(5, rdt.CULVERT_DROWNING, format_str='{:>10}', no_of_dps=3, default=0.0)) out_line = file_line + self.no_of_culvert_rows try: # Load the geometry data for i in range(file_line, out_line): # Put the values into the respective data objects # This is done based on the column widths set in the Dat file # for the river section. self.additional_row_collections['Opening'].addValue( rdt.CULVERT_INVERT, unit_data[i][0:10].strip()) self.additional_row_collections['Opening'].addValue( rdt.CULVERT_SOFFIT, unit_data[i][10:20].strip()) self.additional_row_collections['Opening'].addValue( rdt.CULVERT_AREA, unit_data[i][20:30].strip()) self.additional_row_collections['Opening'].addValue( rdt.CULVERT_CD_PART, unit_data[i][30:40].strip()) self.additional_row_collections['Opening'].addValue( rdt.CULVERT_CD_FULL, unit_data[i][40:50].strip()) self.additional_row_collections['Opening'].addValue( rdt.CULVERT_DROWNING, unit_data[i][50:60].strip()) except NotImplementedError: logger.error( 'Unable to read Unit Data(dataRowObject creation) - NotImplementedError' ) raise self.unit_length += self.no_of_culvert_rows return out_line
class BridgeUnit(AIsisUnit): """Subclass of AIsisUnit storing Isis Bridge Unit data. Note: This really an abstract class for any bridge unit and is not really intended to be used directly. Contains a reference to a rowdatacollection for storing and accessing all the row data. i.e. the geometry data for the section, containing the chainage, elevation, roughness, etc values. Methods for accessing the data in these objects and adding removing rows are available. """ UNIT_TYPE = 'Bridge' CATEGORY = 'Bridge' UNIT_VARS = None def __init__(self, file_order): """Constructor. Args: fileOrder (int): The location of this unit in the file. """ AIsisUnit.__init__(self, file_order) # Fill in the header values these contain the data at the top of the # section, such as the unit name and labels. self.head_data = { 'upstream': '', 'downstream': '', 'remote_us': '', 'remote_ds': '', 'roughness_type': 'MANNING', 'calibration_coef': 1, 'skew_angle': 1, 'width': 0, 'dual_distance': 0, 'no_of_orifices': 0, 'orifice_flag': '', 'op_lower': 0, 'op_upper': 0, 'op_cd': 0, 'comment': '', 'rowcount': 0, 'row_count_additional': { 'Opening': 1 } } self.unit_type = BridgeUnit.UNIT_TYPE self.unit_category = BridgeUnit.CATEGORY self.has_datarows = True self.no_of_collections = 2 self.unit_length = 0 self.no_of_chainage_rows = 1 self.no_of_opening_rows = 1 self.no_of_culvert_rows = 0 self.additional_row_collections = OrderedDict() def getNumberOfOpenings(self): """ """ return self.no_of_opening_rows def getArea(self): """Returns the cross sectional area of the bridge openings. Return: Dict - containing the area of the opening(s). keys = 'total', then '1', '2', 'n' for all openings found. """ return 0 # areas = [] # opening_data = self.additional_row_collections['Opening'] # x_vals = self.row_collection.getRowDataAsList(rdt.CHAINAGE) # y_vals = self.row_collection.getRowDataAsList(rdt.ELEVATION) # # start_vals = opening_data.getRowDataAsList(rdt.OPEN_START) # end_vals = opening_data.getRowDataAsList(rdt.OPEN_END) # soffit_vals = opening_data.getRowDataAsList(rdt.SOFFIT_LEVEL) # springing_vals = opening_data.getRowDataAsList(rdt.SPRINGING_LEVEL) # openings = zip(start_vals, end_vals, soffit_vals, springing_vals) # # for i, x in enumerate(x_vals): # # if math.fabs(x - ) # # # i=0 def readUnitData(self, unit_data, file_line): """Reads the unit data into the geometry objects. See Also: AIsisUnit Args: unit_data (list): The section of the isis dat file pertaining to this section. """ file_line = self._readHeadData(unit_data, file_line) self.name = self.head_data['upstream'] file_line = self._readMainRowData(unit_data, file_line) file_line = self._readAdditionalRowData(unit_data, file_line) self.head_data['rowcount'] = self.row_collection.getNumberOfRows() for key, data in self.additional_row_collections.iteritems(): self.head_data['row_count_additional'][key] = data.getNumberOfRows( ) return file_line def _readHeadData(self, unit_data, file_line): """Format the header data for writing to file. Note: Must be implemented by subclass Raises: NotImplementedError: if not overriden by sub class. """ raise NotImplementedError def _readMainRowData(self, unit_data, file_line): """Reads the units rows into the row collection. This is all the geometry data that occurs after the no of rows variable in the Bridge Units of the dat file. Args: unit_data (list): the data pertaining to this unit. """ # Add the new row data types to the object collection # All of them must have type, output format, default value and position # in the row as the first variables in vars. # The others are DataType specific. self.row_collection = RowDataCollection() self.row_collection.initCollection( do.FloatData(0, rdt.CHAINAGE, format_str='{:>10}', no_of_dps=3)) self.row_collection.initCollection( do.FloatData(1, rdt.ELEVATION, format_str='{:>10}', no_of_dps=3)) self.row_collection.initCollection( do.FloatData(2, rdt.ROUGHNESS, format_str='{:>10}', no_of_dps=3, default=0.0)) self.row_collection.initCollection( do.ConstantData(3, rdt.EMBANKMENT, ('L', 'R'), format_str='{:>11}', default='')) self.unit_length = 6 out_line = file_line + self.no_of_chainage_rows try: # Load the geometry data for i in range(file_line, out_line): # Put the values into the respective data objects # This is done based on the column widths set in the Dat file # for the river section. self.row_collection.addValue(rdt.CHAINAGE, unit_data[i][0:10].strip()) self.row_collection.addValue(rdt.ELEVATION, unit_data[i][10:20].strip()) self.row_collection.addValue(rdt.ROUGHNESS, unit_data[i][20:30].strip()) # Might not exist try: bank = unit_data[i][40:51].strip() except: bank = '' self.row_collection.addValue(rdt.EMBANKMENT, bank) except NotImplementedError: logger.error( 'Unable to read Unit Data(dataRowObject creation) - NotImplementedError' ) raise self.no_of_opening_rows = int(unit_data[out_line].strip()) self.unit_length += self.no_of_chainage_rows + 1 return out_line + 1 def getData(self): """Retrieve the data in this unit. See Also: AIsisUnit - getData() Returns: String list - output data formated the same as in the .DAT file. """ out_data = self._getHeadData() out_data.extend(self._getRowData()) out_data.extend(self._getAdditionalRowData()) return out_data def _formatDataItem(self, item, col_width, no_of_dps=None, is_head_item=True, align_right=True): """Format the given head data item for printing to file. """ if is_head_item: item = self.head_data[item] if not no_of_dps == None: form = '%0.' + str(no_of_dps) + 'f' item = form % float(item) if align_right: final_str = '{:>' + str(col_width) + '}' else: final_str = '{:<' + str(col_width) + '}' return final_str.format(item) def _getRowData(self): """For all the rows in the river geometry section get the data from the rowdatacollection class. Returns: list - containing the formatted unit rows. """ out_data = [] no_of_rows = self.row_collection.getNumberOfRows() out_data.append( self._formatDataItem(no_of_rows, 10, is_head_item=False)) for i in range(0, no_of_rows): out_data.append(self.row_collection.getPrintableRow(i)) return out_data def _getAdditionalRowData(self): """Get the formatted row data for any additional row data objects. Returns: list - containing additional row data. """ out_data = [] for data in self.additional_row_collections.itervalues(): no_of_rows = data.getNumberOfRows() out_data.append( self._formatDataItem(no_of_rows, 10, is_head_item=False)) for i in range(0, no_of_rows): out_data.append(data.getPrintableRow(i)) return out_data def _getHeadData(self): """Get the header data formatted for printing out Note: Must be implemented by concrete subclass. Raises: NotImplementedError: if not overridden by sub class """ raise NotImplementedError def updateDataRow(self, row_vals, index, collection_name=None): """Updates the row at the given index in the river units row_collection. The row will be updated at the given index. Args: row_vals(Dict): keys must be datunits.ROW_DATA_TYPES with a legal value assigned for the DataType. Chainage and Elevation MUST be included. index: the row to update. collection_name=None(str): If None the self.row_collection with the bridges geometry data will be updated. If a string it will be looked for in the self.additional_row_collections dictionary or raise an AttributeError if it doesn't exist. Raises: KeyError: If collection_name key does not exist. IndexError: If the index does not exist. ValueError: If the given value is not accepted by the DataObject's. See Also: ADataObject and subclasses for information on the parameters. """ if not collection_name is None: if not collection_name in self.additional_row_collections.keys(): raise KeyError( 'collection_name %s does not exist in row collection' % (collection_name)) # Call superclass method to add the new row AIsisUnit.updateDataRow(self, index=index, row_vals=row_vals) def addDataRow(self, row_vals, index=None, collection_name=None): """Adds a new row to one of this bridge units row_collection's. The new row will be added at the given index. If no index is given it will be appended to the end of the collection. If no chainage or elevation values are given an AttributeError will be raised as they cannot have default values. All other values can be ommitted. If they are they will be given defaults. Examples: >>> import ship.isis.datunits.rdt as rdt >>> unit.addDataRow({rdt.CHAINAGE:5.0, rdt.ELEVATION:36.2}, index=4) Args: row_vals(Dict): keys must be datunits.rdt with a legal value assigned for the DataType. Chainage and Elevation MUST be included. index=None(int): the row to insert into. The existing row at the given index will be moved up by one. collection_name=None(str): If None the self.row_collection with the bridges geometry data will be updated. If a string it will be looked for in the self.additional_row_collections dictionary or raise an AttributeError if it doesn't exist. Raises: AttributeError: If CHAINAGE or ELEVATION are not given. KeyError: if the collection_name does not exist. IndexError: If the index does not exist. ValueError: If the given value is not accepted by the DataObject's. See Also: ADataObject and subclasses for information on the parameters. """ if not rdt.CHAINAGE in row_vals.keys( ) or not rdt.ELEVATION in row_vals.keys(): logger.error('Required values of CHAINAGE and ELEVATION not given') raise AttributeError( 'Required values of CHAINAGE and ELEVATION not given') if not collection_name is None: if not collection_name in self.additional_row_collections.keys(): raise KeyError( 'collection_name %s does not exist in row collection' % (collection_name)) # Setup default values for arguments that aren't given kw = {} kw[rdt.CHAINAGE] = row_vals.get(rdt.CHAINAGE) kw[rdt.ELEVATION] = row_vals.get(rdt.ELEVATION) kw[rdt.ROUGHNESS] = row_vals.get(rdt.ROUGHNESS, 0.039) kw[rdt.PANEL_MARKER] = row_vals.get(rdt.PANEL_MARKER, False) kw[rdt.RPL] = row_vals.get(rdt.RPL, 1.0) kw[rdt.BANKMARKER] = row_vals.get(rdt.BANKMARKER, '') kw[rdt.EASTING] = row_vals.get(rdt.EASTING, 0.0) kw[rdt.NORTHING] = row_vals.get(rdt.NORTHING, 0.0) kw[rdt.DEACTIVATION] = row_vals.get(rdt.DEACTIVATION, '') kw[rdt.SPECIAL] = row_vals.get(rdt.SPECIAL, '') # Call superclass method to add the new row AIsisUnit.addDataRow(self, index=index, row_vals=kw, collection_name=collection_name) def _checkChainageIncreaseNotNegative(self, index, chainageValue): """Checks that new chainage value is not not higher than the next one. If the given chainage value for the given index is higher than the value in the following row ISIS will give a negative chainage error. It will return true if the value is the last in the row. Args: index (int): The index that the value is to be added at. chainageValue (float): The chainage value to be added. Returns: False if greater or True if less. """ if index == None: return True if not index == 0: if self.row_collection.getDataValue(rdt.CHAINAGE, index - 1) >= chainageValue: return False if self.row_collection.getDataValue(rdt.CHAINAGE, index) <= chainageValue: return False return True
def _readMainRowData(self, unit_data, file_line): """Reads the units rows into the row collection. This is all the geometry data that occurs after the no of rows variable in the Bridge Units of the dat file. Args: unit_data (list): the data pertaining to this unit. """ # Add the new row data types to the object collection # All of them must have type, output format, default value and position # in the row as the first variables in vars. # The others are DataType specific. self.row_collection = RowDataCollection() self.row_collection.initCollection( do.FloatData(0, rdt.CHAINAGE, format_str='{:>10}', no_of_dps=3)) self.row_collection.initCollection( do.FloatData(1, rdt.ELEVATION, format_str='{:>10}', no_of_dps=3)) self.row_collection.initCollection( do.FloatData(2, rdt.ROUGHNESS, format_str='{:>10}', no_of_dps=3, default=0.0)) self.row_collection.initCollection( do.ConstantData(3, rdt.EMBANKMENT, ('L', 'R'), format_str='{:>11}', default='')) self.unit_length = 6 out_line = file_line + self.no_of_chainage_rows try: # Load the geometry data for i in range(file_line, out_line): # Put the values into the respective data objects # This is done based on the column widths set in the Dat file # for the river section. self.row_collection.addValue(rdt.CHAINAGE, unit_data[i][0:10].strip()) self.row_collection.addValue(rdt.ELEVATION, unit_data[i][10:20].strip()) self.row_collection.addValue(rdt.ROUGHNESS, unit_data[i][20:30].strip()) # Might not exist try: bank = unit_data[i][40:51].strip() except: bank = '' self.row_collection.addValue(rdt.EMBANKMENT, bank) except NotImplementedError: logger.error( 'Unable to read Unit Data(dataRowObject creation) - NotImplementedError' ) raise self.no_of_opening_rows = int(unit_data[out_line].strip()) self.unit_length += self.no_of_chainage_rows + 1 return out_line + 1
class RiverUnit(AIsisUnit): """Concrete implementation of AIsisUnit storing Isis River Unit data. Contains a reference to a rowdatacollection for storing and accessing all the row data. i.e. the geometry data for the section, containing the chainage, elevation, roughness, etc values. Methods for accessing the data in these objects and adding removing rows are available. See Also: AIsisUnit """ UNIT_TYPE = 'River' CATEGORY = 'River' FILE_KEY = 'RIVER' def __init__(self, file_order, reach_number): """Constructor. Args: fileOrder (int): The location of this unit in the file. reach_number (int): The reach ID for this unit. """ AIsisUnit.__init__(self, file_order) # Fill in the header values these contain the data at the top of the # section, such as the unit name and labels. self.head_data = { 'section_label': '', 'spill1': '', 'spill2': '', 'lateral1': '', 'lateral2': '', 'lateral3': '', 'lateral4': '', 'distance': 0, 'slope': '', 'density': 1000, 'comment': '', 'rowcount': 0 } self.unit_type = RiverUnit.UNIT_TYPE self.unit_category = RiverUnit.CATEGORY self.has_datarows = True self.reach_number = reach_number self.unit_length = 0 def readUnitData(self, unit_data, file_line): """Reads the unit data into the geometry objects. See Also: AIsisUnit - readUnitData for more information. Args: unit_data (list): The section of the isis dat file pertaining to this section """ file_line = self._readHeadData(unit_data, file_line) file_line = self._readRowData(unit_data, file_line) self.head_data['rowcount'] = self.row_collection.getNumberOfRows() return file_line - 1 def _readHeadData(self, unit_data, file_line): """Format the header data for writing to file. Args: unit_data (list): containing the data to read. """ self.head_data['comment'] = unit_data[file_line + 0][5:].strip() self.name = self.head_data['section_label'] = unit_data[ file_line + 2][:12].strip() self.head_data['spill1'] = unit_data[file_line + 2][12:24].strip() self.head_data['spill2'] = unit_data[file_line + 2][24:36].strip() self.head_data['lateral1'] = unit_data[file_line + 2][36:48].strip() self.head_data['lateral2'] = unit_data[file_line + 2][48:60].strip() self.head_data['lateral3'] = unit_data[file_line + 2][60:72].strip() self.head_data['lateral4'] = unit_data[file_line + 2][72:84].strip() self.head_data['distance'] = unit_data[file_line + 3][0:10].strip() self.head_data['slope'] = unit_data[file_line + 3][10:30].strip() self.head_data['density'] = unit_data[file_line + 3][30:40].strip() self.unit_length = int(unit_data[file_line + 4].strip()) return file_line + 5 def _readRowData(self, unit_data, file_line): """Reads the units rows into the row collection. This is all the geometry data that occurs after the no of rows variable in the River Units of the dat file. Args: unit_data (list): the data pertaining to this unit. """ # Add the new row data types to the object collection # All of them must have type, output format, default value and position # in the row as the first variables in vars. # The others are DataType specific. self.row_collection = RowDataCollection() self.row_collection.initCollection( do.FloatData(0, rdt.CHAINAGE, format_str='{:>10}', no_of_dps=3)) self.row_collection.initCollection( do.FloatData(1, rdt.ELEVATION, format_str='{:>10}', no_of_dps=3)) self.row_collection.initCollection( do.FloatData(2, rdt.ROUGHNESS, format_str='{:>10}', default=0.0, no_of_dps=3)) self.row_collection.initCollection( do.SymbolData(3, rdt.PANEL_MARKER, '*', format_str='{:<5}', default=False)) self.row_collection.initCollection( do.FloatData(4, rdt.RPL, format_str='{:>5}', default=1.000, no_of_dps=3)) self.row_collection.initCollection( do.ConstantData(5, rdt.BANKMARKER, ('LEFT', 'RIGHT', 'BED'), format_str='{:<10}', default='')) self.row_collection.initCollection( do.FloatData(6, rdt.EASTING, format_str='{:>10}', default=0.0, no_of_dps=2)) self.row_collection.initCollection( do.FloatData(7, rdt.NORTHING, format_str='{:>10}', default=0.0, no_of_dps=2)) self.row_collection.initCollection( do.ConstantData(8, rdt.DEACTIVATION, ('LEFT', 'RIGHT'), format_str='{:<10}', default='')) # Default == '~' means to ignore formatting and apply '' when value is None self.row_collection.initCollection( do.StringData(9, rdt.SPECIAL, format_str='{:<10}', default='~')) out_line = file_line + self.unit_length try: # Load the geometry data for i in range(file_line, out_line): # Put the values into the respective data objects # This is done based on the column widths set in the Dat file # for the river section. self.row_collection.addValue(rdt.CHAINAGE, unit_data[i][0:10].strip()) self.row_collection.addValue(rdt.ELEVATION, unit_data[i][10:20].strip()) self.row_collection.addValue(rdt.ROUGHNESS, unit_data[i][20:30].strip()) self.row_collection.addValue(rdt.PANEL_MARKER, unit_data[i][30:35].strip()) self.row_collection.addValue(rdt.RPL, unit_data[i][35:40].strip()) self.row_collection.addValue(rdt.BANKMARKER, unit_data[i][40:50].strip()) self.row_collection.addValue(rdt.EASTING, unit_data[i][50:60].strip()) self.row_collection.addValue(rdt.NORTHING, unit_data[i][60:70].strip()) self.row_collection.addValue(rdt.DEACTIVATION, unit_data[i][70:80].strip()) self.row_collection.addValue(rdt.SPECIAL, unit_data[i][80:90].strip()) except NotImplementedError: logger.ERROR( 'Unable to read Unit Data(dataRowObject creation) - NotImplementedError' ) raise return out_line def getData(self): """Retrieve the data in this unit. The String[] returned is formatted for printing in the fashion of the .dat file. Return: List of strings formated for writing to .dat file. """ out_data = self._getHeadData() out_data.extend(self._getRowData()) return out_data def _getRowData(self): """Returns the row data in this class. For all the rows in the river geometry section get the data from the rowdatacollection class. Returns: list = containing the formatted unit rows. """ out_data = [] for i in range(0, self.row_collection.getNumberOfRows()): out_data.append(self.row_collection.getPrintableRow(i)) return out_data def _getHeadData(self): """Get the header data formatted for printing out to file. Returns: List of strings - The formatted header list. """ out_data = [] self.head_data['rowcount'] = self.unit_length out_data.append('RIVER ' + self.head_data['comment']) out_data.append('SECTION') # Get the row with the section name and spill info from the formatter out_data.append(self._getHeadSectionRowFormat()) out_data.append('{:>10}'.format(self.head_data['distance']) + '{:>20}'.format(self.head_data['slope']) + '{:>10}'.format(self.head_data['density'])) out_data.append('{:>10}'.format(self.head_data['rowcount'])) return out_data def _getHeadSectionRowFormat(self): """Formats the section name and spill file row according to contents. This is quite a pedantic method. Essentially if there are spills in the line of the file they each get 12 spaces. However if it's just the one spill there the whitespace is cut off the end. Isis is pretty weird about white space so it's best to get it right. Returns: string containing row data with whitespace trimmed from the right side. """ section_row = '{:<12}'.format(self.head_data['section_label']) if not self.head_data['spill1'] == '': section_row += '{:<12}'.format(self.head_data['spill1']) if not self.head_data['spill2'] == '': section_row += '{:<12}'.format(self.head_data['spill2']) section_row = section_row.rstrip() return section_row def updateDataRow(self, row_vals, index): """Updates the row at the given index in the river units row_collection. The row will be updated at the given index. Args: row_vals(Dict): keys must be datunits.ROW_DATA_TYPES with a legal value assigned for the DataType. Chainage and Elevation MUST be included. index: the row to update. Raises: AttributeError: If CHAINAGE or ELEVATION are not given. IndexError: If the index does not exist. ValueError: If the given value is not accepted by the DataObject's. See Also: ADataObject and subclasses for information on the parameters. """ # Call superclass method to add the new row AIsisUnit.updateDataRow(self, index=index, row_vals=row_vals) def addDataRow(self, row_vals, index=None): """Adds a new row to the river units row_collection. The new row will be added at the given index. If no index is given it will be appended to the end of the collection. If no chainage or elevation values are given a AttributeError will be raised as they cannot have default values. All other values can be ommitted. If they are they will be given defaults. Examples: >>> import ship.isis.datunits.ROW_DATA_TYPES as rdt >>> river_unit.addDataRow({rdt.CHAINAGE:5.0, rdt.ELEVATION:36.2}, index=4) Args: row_vals(Dict): keys must be datunits.ROW_DATA_TYPES with a legal value assigned for the DataType. Chainage and Elevation MUST be included. index=None(int): the row to insert into. The existing row at the given index will be moved up by one. Raises: AttributeError: If CHAINAGE or ELEVATION are not given. IndexError: If the index does not exist. ValueError: If the given value is not accepted by the DataObject's. See Also: ADataObject and subclasses for information on the parameters. """ if not rdt.CHAINAGE in row_vals.keys( ) or not rdt.ELEVATION in row_vals.keys(): logger.error('Required values of CHAINAGE and ELEVATION not given') raise AttributeError( 'Required values of CHAINAGE and ELEVATION not given') # Setup default values for arguments that aren't given kw = {} kw[rdt.CHAINAGE] = row_vals.get(rdt.CHAINAGE) kw[rdt.ELEVATION] = row_vals.get(rdt.ELEVATION) kw[rdt.ROUGHNESS] = row_vals.get(rdt.ROUGHNESS, 0.039) kw[rdt.PANEL_MARKER] = row_vals.get(rdt.PANEL_MARKER, False) kw[rdt.RPL] = row_vals.get(rdt.RPL, 1.0) kw[rdt.BANKMARKER] = row_vals.get(rdt.BANKMARKER, '') kw[rdt.EASTING] = row_vals.get(rdt.EASTING, 0.0) kw[rdt.NORTHING] = row_vals.get(rdt.NORTHING, 0.0) kw[rdt.DEACTIVATION] = row_vals.get(rdt.DEACTIVATION, '') kw[rdt.SPECIAL] = row_vals.get(rdt.SPECIAL, '') # Call superclass method to add the new row AIsisUnit.addDataRow(self, index=index, row_vals=kw)
def _readRowData(self, unit_data, file_line): """Reads the units rows into the row collection. This is all the geometry data that occurs after the no of rows variable in the River Units of the dat file. Args: unit_data (list): the data pertaining to this unit. """ # Add the new row data types to the object collection # All of them must have type, output format, default value and position # in the row as the first variables in vars. # The others are DataType specific. self.row_collection = RowDataCollection() self.row_collection.initCollection( do.FloatData(0, rdt.CHAINAGE, format_str='{:>10}', no_of_dps=3)) self.row_collection.initCollection( do.FloatData(1, rdt.ELEVATION, format_str='{:>10}', no_of_dps=3)) self.row_collection.initCollection( do.FloatData(2, rdt.ROUGHNESS, format_str='{:>10}', default=0.0, no_of_dps=3)) self.row_collection.initCollection( do.SymbolData(3, rdt.PANEL_MARKER, '*', format_str='{:<5}', default=False)) self.row_collection.initCollection( do.FloatData(4, rdt.RPL, format_str='{:>5}', default=1.000, no_of_dps=3)) self.row_collection.initCollection( do.ConstantData(5, rdt.BANKMARKER, ('LEFT', 'RIGHT', 'BED'), format_str='{:<10}', default='')) self.row_collection.initCollection( do.FloatData(6, rdt.EASTING, format_str='{:>10}', default=0.0, no_of_dps=2)) self.row_collection.initCollection( do.FloatData(7, rdt.NORTHING, format_str='{:>10}', default=0.0, no_of_dps=2)) self.row_collection.initCollection( do.ConstantData(8, rdt.DEACTIVATION, ('LEFT', 'RIGHT'), format_str='{:<10}', default='')) # Default == '~' means to ignore formatting and apply '' when value is None self.row_collection.initCollection( do.StringData(9, rdt.SPECIAL, format_str='{:<10}', default='~')) out_line = file_line + self.unit_length try: # Load the geometry data for i in range(file_line, out_line): # Put the values into the respective data objects # This is done based on the column widths set in the Dat file # for the river section. self.row_collection.addValue(rdt.CHAINAGE, unit_data[i][0:10].strip()) self.row_collection.addValue(rdt.ELEVATION, unit_data[i][10:20].strip()) self.row_collection.addValue(rdt.ROUGHNESS, unit_data[i][20:30].strip()) self.row_collection.addValue(rdt.PANEL_MARKER, unit_data[i][30:35].strip()) self.row_collection.addValue(rdt.RPL, unit_data[i][35:40].strip()) self.row_collection.addValue(rdt.BANKMARKER, unit_data[i][40:50].strip()) self.row_collection.addValue(rdt.EASTING, unit_data[i][50:60].strip()) self.row_collection.addValue(rdt.NORTHING, unit_data[i][60:70].strip()) self.row_collection.addValue(rdt.DEACTIVATION, unit_data[i][70:80].strip()) self.row_collection.addValue(rdt.SPECIAL, unit_data[i][80:90].strip()) except NotImplementedError: logger.ERROR( 'Unable to read Unit Data(dataRowObject creation) - NotImplementedError' ) raise return out_line
def test_addDataRow_method(self): '''Test adding a new row to the river section ''' river = riverunit.RiverUnit(1, 1) river.head_data = self.header_vars river.unit_length = 18 river.row_collection # Create some data objects objs = [] objs.append(do.FloatData(0, rdt.CHAINAGE, format_str='{:>10}', no_of_dps=3)) objs.append(do.FloatData(1, rdt.ELEVATION, format_str='{:>10}', no_of_dps=3)) objs.append(do.FloatData(2, rdt.ROUGHNESS, format_str='{:>10}', default=0.0, no_of_dps=3)) objs.append(do.SymbolData(3, rdt.PANEL_MARKER, '*', format_str='{:<5}', default=False)) objs.append(do.FloatData(4, rdt.RPL, format_str='{:>5}', default=1.000, no_of_dps=3)) objs.append(do.ConstantData(5, rdt.BANKMARKER, ('LEFT', 'RIGHT', 'BED'), format_str='{:<10}', default='')) objs.append(do.FloatData(6, rdt.EASTING, format_str='{:>10}', default=0.0, no_of_dps=2)) objs.append(do.FloatData(7, rdt.NORTHING, format_str='{:>10}', default=0.0, no_of_dps=2)) objs.append(do.ConstantData(8, rdt.DEACTIVATION, ('LEFT', 'RIGHT'), format_str='{:<10}', default='')) objs.append(do.StringData(9, rdt.SPECIAL, format_str='{:<10}', default='~')) # Populate the data objs[0].data_collection = [5.996, 6.936, 7.446, 7.635, 8.561, 9.551, 10.323, 10.904, 12.542, 13.74, 13.788, 13.944, 15.008, 16.355, 17.424, 18.449, 19.416, 19.420] objs[1].data_collection = [37.56, 37.197, 36.726, 35.235, 35.196, 35.19, 35.229, 35.319, 35.637, 35.593, 35.592, 36.148, 36.559, 37.542, 38.518, 39.037, 39.146, 39.133] objs[2].data_collection = [0.08, 0.035, 0.035, 0.035, 0.035, 0.035, 0.035, 0.035, 0.035, 0.035, 0.035, 0.035, 0.08, 0.08, 0.08, 0.08, 0.08, 0.08] objs[3].data_collection = [False, True, False, False, False, False, False, False, False, False, False, False, True, False, False, False, False, False] objs[4].data_collection = [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0] objs[5].data_collection = ['LEFT', False, False, False, False, 'BED', False, False, False, False, False, False, False, False, False, False, False, 'RIGHT'] objs[6].data_collection = [291391.67, 291391.43, 291391.3, 291391.25, 291391.01, 291390.75, 291390.55, 291390.4, 291389.98, 291389.67, 291389.66, 291389.62, 291389.34, 291389.0, 291388.72, 291388.46, 291388.21, 291388.21] objs[7].data_collection = [86582.61, 86581.7, 86581.21, 86581.03, 86580.13, 86579.18, 86578.43, 86577.87, 86576.29, 86575.13, 86575.09, 86574.93, 86573.91, 86572.6, 86571.57, 86570.58, 86569.65, 86569.65] objs[8].data_collection = ['LEFT', False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, 'RIGHT'] objs[9].data_collection = ['16', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '4095'] # Add the data object to the row data collection col = RowDataCollection() for o in objs: o.record_length = 18 col._collection.append(o) river.row_collection = col # Add a new row river.addDataRow(row_vals={rdt.CHAINAGE: 9.42, rdt.ELEVATION: 35.2, rdt.ROUGHNESS: 0.035, rdt.SPECIAL: '1264'}, index=5) # Make sure that we get back the same values as we set. self.assertEqual(9.42, river.row_collection._collection[rdt.CHAINAGE].data_collection[5], 'Add new row - get chainage value failed') self.assertEqual(35.2, river.row_collection._collection[rdt.ELEVATION].data_collection[5], 'Add new row - get elevation value failed') self.assertEqual(0.035, river.row_collection._collection[rdt.ROUGHNESS].data_collection[5], 'Add new row - get roughness value failed') self.assertEqual(False, river.row_collection._collection[rdt.PANEL_MARKER].data_collection[5], 'Add new row - get panelmarker value failed') self.assertEqual(1.000, river.row_collection._collection[rdt.RPL].data_collection[5], 'Add new row - get rpl value failed') self.assertEqual(False, river.row_collection._collection[rdt.BANKMARKER].data_collection[5], 'Add new row - get bankmarker value failed') self.assertEqual(0.00, river.row_collection._collection[rdt.EASTING].data_collection[5], 'Add new row - get easting value failed') self.assertEqual(0.00, river.row_collection._collection[rdt.NORTHING].data_collection[5], 'Add new row - get northing value failed') self.assertEqual(False, river.row_collection._collection[rdt.DEACTIVATION].data_collection[5], 'Add new row - get deactivation value failed') self.assertEqual('1264', river.row_collection._collection[rdt.SPECIAL].data_collection[5], 'Add new row - get special value failed') # This is how we expect the data to look when we get back out of the addRow() method new_chainage = [5.996, 6.936, 7.446, 7.635, 8.561, 9.42, 9.551, 10.323, 10.904, 12.542, 13.74, 13.788, 13.944, 15.008, 16.355, 17.424, 18.449, 19.416, 19.42] new_elevation = [37.56, 37.197, 36.726, 35.235, 35.196, 35.2, 35.19, 35.229, 35.319, 35.637, 35.593, 35.592, 36.148, 36.559, 37.542, 38.518, 39.037, 39.146, 39.133] new_roughness = [0.08, 0.035, 0.035, 0.035, 0.035, 0.035, 0.035, 0.035, 0.035, 0.035, 0.035, 0.035, 0.035, 0.08, 0.08, 0.08, 0.08, 0.08, 0.08] new_panelmarker = [False, True, False, False, False, False, False, False, False, False, False, False, False, True, False, False, False, False, False] new_rpl = [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0] new_bankmarker = ['LEFT', False, False, False, False, False, 'BED', False, False, False, False, False, False, False, False, False, False, False, 'RIGHT'] new_easting = [291391.67, 291391.43, 291391.3, 291391.25, 291391.01, 0.00, 291390.75, 291390.55, 291390.4, 291389.98, 291389.67, 291389.66, 291389.62, 291389.34, 291389.0, 291388.72, 291388.46, 291388.21, 291388.21] new_northing = [86582.61, 86581.7, 86581.21, 86581.03, 86580.13, 0.00, 86579.18, 86578.43, 86577.87, 86576.29, 86575.13, 86575.09, 86574.93, 86573.91, 86572.6, 86571.57, 86570.58, 86569.65, 86569.65] new_deactivation = ['LEFT', False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, 'RIGHT'] new_special = ['16', '', '', '', '', '1264', '', '', '', '', '', '', '', '', '', '', '', '', '4095'] # Make sure that we get back the same values as we set. I.e. the data objects are updated as expected. self.assertListEqual(new_chainage, river.row_collection._collection[0].data_collection, 'Chainage list comparison after insertion fail') self.assertListEqual(new_elevation, river.row_collection._collection[1].data_collection, 'Elevation list comparison after insertion fail') self.assertListEqual(new_roughness, river.row_collection._collection[2].data_collection, 'Roughness list comparison after insertion fail') self.assertListEqual(new_panelmarker, river.row_collection._collection[3].data_collection, 'Panelmarker list comparison after insertion fail') self.assertListEqual(new_rpl, river.row_collection._collection[4].data_collection, 'Rpl list comparison after insertion fail') self.assertListEqual(new_bankmarker, river.row_collection._collection[5].data_collection, 'Bankmarker list comparison after insertion fail') self.assertListEqual(new_easting, river.row_collection._collection[6].data_collection, 'Easting list comparison after insertion fail') self.assertListEqual(new_northing, river.row_collection._collection[7].data_collection, 'Northing list comparison after insertion fail') self.assertListEqual(new_deactivation, river.row_collection._collection[8].data_collection, 'Deactivation list comparison after insertion fail') self.assertListEqual(new_special, river.row_collection._collection[9].data_collection, 'Special list comparison after insertion fail') # Check that it recognises illegal input values self.assertRaises(AttributeError, lambda: river.addDataRow({'trick': 39.1})) # Check that it recognises when it will cause a negative chainage increase self.assertRaises(ValueError, lambda: river.addDataRow({rdt.CHAINAGE: 10.42, rdt.ELEVATION: 35.3}, 5))
def test_getData_method(self): '''Test to check the suitability of the getData() method. ''' river = riverunit.RiverUnit(1, 1) river.head_data = self.header_vars river.unit_length = 18 river.row_collection # Create some data objects objs = [] objs.append(do.FloatData(0, rdt.CHAINAGE, format_str='{:>10}', no_of_dps=3)) objs.append(do.FloatData(1, rdt.ELEVATION, format_str='{:>10}', no_of_dps=3)) objs.append(do.FloatData(2, rdt.ROUGHNESS, format_str='{:>10}', default=0.0, no_of_dps=3)) objs.append(do.SymbolData(3, rdt.PANEL_MARKER, '*', format_str='{:<5}', default=False)) objs.append(do.FloatData(4, rdt.RPL, format_str='{:>5}', default=1.000, no_of_dps=3)) objs.append(do.ConstantData(5, rdt.BANKMARKER, ('LEFT', 'RIGHT', 'BED'), format_str='{:<10}', default='')) objs.append(do.FloatData(6, rdt.EASTING, format_str='{:>10}', default=0.0, no_of_dps=2)) objs.append(do.FloatData(7, rdt.NORTHING, format_str='{:>10}', default=0.0, no_of_dps=2)) objs.append(do.ConstantData(8, rdt.DEACTIVATION, ('LEFT', 'RIGHT'), format_str='{:<10}', default='')) objs.append(do.StringData(9, rdt.SPECIAL, format_str='{:<10}', default='~')) # Populate the data objs[0].data_collection = [5.996, 6.936, 7.446, 7.635, 8.561, 9.551, 10.323, 10.904, 12.542, 13.74, 13.788, 13.944, 15.008, 16.355, 17.424, 18.449, 19.416, 19.420] objs[1].data_collection = [37.56, 37.197, 36.726, 35.235, 35.196, 35.19, 35.229, 35.319, 35.637, 35.593, 35.592, 36.148, 36.559, 37.542, 38.518, 39.037, 39.146, 39.133] objs[2].data_collection = [0.08, 0.035, 0.035, 0.035, 0.035, 0.035, 0.035, 0.035, 0.035, 0.035, 0.035, 0.035, 0.08, 0.08, 0.08, 0.08, 0.08, 0.08] objs[3].data_collection = [False, True, False, False, False, False, False, False, False, False, False, False, True, False, False, False, False, False] objs[4].data_collection = [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0] objs[5].data_collection = ['LEFT', False, False, False, False, 'BED', False, False, False, False, False, False, False, False, False, False, False, 'RIGHT'] objs[6].data_collection = [291391.67, 291391.43, 291391.3, 291391.25, 291391.01, 291390.75, 291390.55, 291390.4, 291389.98, 291389.67, 291389.66, 291389.62, 291389.34, 291389.0, 291388.72, 291388.46, 291388.21, 291388.21] objs[7].data_collection = [86582.61, 86581.7, 86581.21, 86581.03, 86580.13, 86579.18, 86578.43, 86577.87, 86576.29, 86575.13, 86575.09, 86574.93, 86573.91, 86572.6, 86571.57, 86570.58, 86569.65, 86569.65] objs[8].data_collection = ['LEFT', False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, 'RIGHT'] objs[9].data_collection = ['16', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '4095'] # Add the data object to the row data collection river.row_collection = RowDataCollection() for o in objs: o.record_length = 18 river.row_collection._collection.append(o) # Setup the list that we expect to be returned from the getData() method out_data = \ ['RIVER (Culvert Exit) CH:7932 - Trimmed to BT', 'SECTION', '1.069', ' 15.078 1.111111 1000', ' 18', ' 5.996 37.560 0.080 1.000LEFT 291391.67 86582.61LEFT 16 ', ' 6.936 37.197 0.035* 1.000 291391.43 86581.70 ', ' 7.446 36.726 0.035 1.000 291391.30 86581.21 ', ' 7.635 35.235 0.035 1.000 291391.25 86581.03 ', ' 8.561 35.196 0.035 1.000 291391.01 86580.13 ', ' 9.551 35.190 0.035 1.000BED 291390.75 86579.18 ', ' 10.323 35.229 0.035 1.000 291390.55 86578.43 ', ' 10.904 35.319 0.035 1.000 291390.40 86577.87 ', ' 12.542 35.637 0.035 1.000 291389.98 86576.29 ', ' 13.740 35.593 0.035 1.000 291389.67 86575.13 ', ' 13.788 35.592 0.035 1.000 291389.66 86575.09 ', ' 13.944 36.148 0.035 1.000 291389.62 86574.93 ', ' 15.008 36.559 0.080* 1.000 291389.34 86573.91 ', ' 16.355 37.542 0.080 1.000 291389.00 86572.60 ', ' 17.424 38.518 0.080 1.000 291388.72 86571.57 ', ' 18.449 39.037 0.080 1.000 291388.46 86570.58 ', ' 19.416 39.146 0.080 1.000 291388.21 86569.65 ', ' 19.420 39.133 0.080 1.000RIGHT 291388.21 86569.65RIGHT 4095 '] # Get the data and check it against our template data = river.getData() self.assertEquals(out_data, data, 'getData() formatting failed')