def setupRowCollection(): """Setup the RowDataCollection for loading the data into. """ # First entry doesn't want to have a comma in front when formatting. row_collection = RowDataCollection() types = [0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1] # Do the first entry separately because it has a different format string row_collection.initCollection( do.StringData(0, 0, format_str='{0}', default='')) for i, t in enumerate(types, 1): if t == 0: row_collection.initCollection( do.StringData(i, i, format_str=', {0}', default='')) else: row_collection.initCollection( do.FloatData(i, i, format_str=', {0}', no_of_dps=3, default=0.00)) # Add a couple of extra rows to the row_collection for tracking the # data in the file. row_collection.initCollection(do.IntData(15, 'row_no')) return row_collection
def _readArchRowData(self, unit_data, file_line): """Load the data defining the openings in the bridge. Args: unit_data (list): the data pertaining to this unit. TODO: Change the name of this function to _readOpeningRowData. """ # Add the new row data types to the object collection # All of them must have type, output format, default value and position # in the row as the first variables in vars. # The others are DataType specific. self.additional_row_collections['Opening'] = RowDataCollection() self.additional_row_collections['Opening'].initCollection( do.FloatData(0, rdt.OPEN_START, format_str='{:>10}', no_of_dps=3)) self.additional_row_collections['Opening'].initCollection( do.FloatData(1, rdt.OPEN_END, format_str='{:>10}', no_of_dps=3)) self.additional_row_collections['Opening'].initCollection( do.FloatData(2, rdt.SPRINGING_LEVEL, format_str='{:>10}', no_of_dps=3, default=0.0)) self.additional_row_collections['Opening'].initCollection( do.FloatData(3, rdt.SOFFIT_LEVEL, format_str='{:>10}', no_of_dps=3, default=0.0)) out_line = file_line + self.no_of_opening_rows try: # Load the geometry data for i in range(file_line, out_line): # Put the values into the respective data objects # This is done based on the column widths set in the Dat file # for the river section. self.additional_row_collections['Opening'].addValue( rdt.OPEN_START, unit_data[i][0:10].strip()) self.additional_row_collections['Opening'].addValue( rdt.OPEN_END, unit_data[i][10:20].strip()) self.additional_row_collections['Opening'].addValue( rdt.SPRINGING_LEVEL, unit_data[i][20:30].strip()) self.additional_row_collections['Opening'].addValue( rdt.SOFFIT_LEVEL, unit_data[i][30:40].strip()) except NotImplementedError: logger.error( 'Unable to read Unit Data(dataRowObject creation) - NotImplementedError' ) raise self.no_of_culvert_rows = int(unit_data[out_line].strip()) self.unit_length += self.no_of_culvert_rows + 1 return out_line
def _readRowData(self, unit_data, file_line): """Reads the units rows into the row collection. This is all the geometry data that occurs after the no of rows variable in the Spill Units of the dat file. Args: unit_data: the data pertaining to this unit. """ # Add the new row data types to the object collection # All of them must have type, output format, default value and position # in the row as the first variables in vars. # The others are DataType specific. self.row_collection = RowDataCollection() self.row_collection.initCollection(do.FloatData(0, rdt.CHAINAGE, format_str='{:>10}', no_of_dps=3)) self.row_collection.initCollection(do.FloatData(1, rdt.ELEVATION, format_str='{:>10}', no_of_dps=3)) self.row_collection.initCollection(do.FloatData(2, rdt.EASTING, format_str='{:>10}', no_of_dps=2, default=0.0)) self.row_collection.initCollection(do.FloatData(3, rdt.NORTHING, format_str='{:>10}', no_of_dps=2, default=0.0)) out_line = file_line + self.unit_length try: # Load the geometry data for i in range(file_line, out_line): # Put the values into the respective data objects # This is done based on the column widths set in the Dat file # for the spill section. self.row_collection.addValue(rdt.CHAINAGE, unit_data[i][0:10].strip()) self.row_collection.addValue(rdt.ELEVATION, unit_data[i][10:20].strip()) # In some edge cases there are no values set in the file for the # easting and northing, so use defaults. if not len(unit_data[i]) > 21: self.row_collection.addValue(rdt.EASTING) self.row_collection.addValue(rdt.NORTHING) else: self.row_collection.addValue(rdt.EASTING, unit_data[i][20:30].strip()) self.row_collection.addValue(rdt.NORTHING, unit_data[i][30:40].strip()) except NotImplementedError: logger.ERROR('Unable to read Unit Data(dataRowObject creation) - NotImplementedError') raise return out_line
def test_negativeChainageCheck_method(self): '''Tests the negative chainage check method. @note: The method doesnot need to check for any index issues because that is done in the calling method. ''' # Create RiverUnit object and give it a chainage object with some data river = riverunit.RiverUnit(1, 1) chainage = do.FloatData(0, rdt.CHAINAGE, format_str='{:>10}', default=None, no_of_dps=3) chainage.data_collection = self.chainage chainage.record_length = 18 river.row_collection = RowDataCollection() river.row_collection._collection.append(chainage) # check that we catch a negative chainage increase - > value to the right self.assertFalse(river._checkChainageIncreaseNotNegative(6, 10.42), 'Catch negative chainage increase fail (1)') # check that we catch a negative chainage increase - < value to the left self.assertFalse(river._checkChainageIncreaseNotNegative(5, 8.4), 'Catch negative chainage increase fail (2)') # Check that we don't stop a non-negative chainage increase. self.assertTrue(river._checkChainageIncreaseNotNegative(7, 10.42), 'Let non-negative increase through fail') # check that we can insert at the end. self.assertTrue(river._checkChainageIncreaseNotNegative(17, 19.418), 'Let non-negative increase at end of list through fail')
def readTmfFile(datafile): """Loads the contents of the Materials CSV file referenced by datafile. Loads the data from the file referenced by the given TuflowFile object into a :class:'rowdatacollection' and a list of comment only lines. Args: datafile(TuflowFile): TuflowFile object with file details. Return: tuple: rowdatacollection, comment_lines(list). See Also: :class:'rowdatacollection'. """ value_separator = ',' comment_types = ['#', '!'] tmf_enum = dataobj.TmfEnum() path = datafile.getAbsolutePath() value_order = range(11) row_collection = RowDataCollection() row_collection.initCollection(do.IntData(0, 0, format_str=None, default='')) for i in range(1, 11): row_collection.initCollection( do.FloatData(i, i, format_str=', {0}', default='', no_of_dps=3)) # Keep track of any comment lines and the row numbers as well row_collection.initCollection( do.StringData(11, 'comment', format_str=' ! {0}', default='')) row_collection.initCollection( do.IntData(12, 'row_no', format_str=None, default='')) contents = [] logger.info('Loading data file contents from disc - %s' % (path)) contents = _loadFileFromDisc(path) # Stores the comments found in the file comment_lines = [] # Loop through the contents list loaded from file line-by-line. first_data_line = False row_count = 0 for i, line in enumerate(contents, 0): comment = hasCommentOnlyLine(line, comment_types) if comment or comment == '': comment_lines.append(comment) # If we have a line that isn't a comment or a blank then it is going # to contain materials entries. else: comment_lines.append(None) row_collection = _loadRowData(line, row_count, row_collection, tmf_enum.ITERABLE, comment_types, value_separator) row_count += 1 # Just need to reset the has_changed variable because it will have been # set to True while loading everything in. for i in range(0, len(value_order)): row_collection.getDataObject(value_order[i]).has_changed = False return row_collection, comment_lines
def readMatSubfile(main_datafile, filename, header_list): #path, root, header1, header2): """ """ value_separator = ',' comment_types = ['#', '!'] mat_subfile_enum = dataobj.SubfileMatEnum() path = os.path.join(main_datafile.root, filename) root = main_datafile.root header1 = 'None' header2 = 'None' if len(header_list) > 0: header1 = header_list[0] if len(header_list) > 1: header2 = header_list[1] def _scanfile(filepath): """Scans the file before we do any loading to identify the contents. Need to do this because the file can be setup in so many way that it becomes a headache to work it out in advance. Better to take a little bit of extra processing time and do some quick checks first. Arguments: file_path (str): the path to the subfile. Return: tuple: list: booleans with whether the column contains data that we want or not. int: length of the cols list. list: containing all of the first row column data int: first row with usable data on. """ logger.debug('Scanning Materials file - %s' % (filepath)) with open(filepath, 'rb') as csv_file: csv_file = csv.reader(csv_file) cols = [] head_list = [] start_row = -1 for i, row in enumerate(csv_file, 0): if "".join(row).strip() == "": break for j, col in enumerate(row, 0): if i == 0: cols.append(False) head_list = row elif uuf.isNumeric(col): cols[j] = True if start_row == -1: start_row = i elif cols[j] == True: break return cols, len(cols), head_list, start_row def _loadHeadData(row, row_collection, col_length): """ """ new_row = [None] * 12 comment_indices, length = uuf.findSubstringInList('!', row) comment_lines.append(None) head1_location = -1 head2_location = -1 row_length = len(row) for i in range(0, col_length): if i < row_length: entry = row[i].strip() if entry == header1: head1_location = i if entry == header2: head2_location = i row_collection.addValue('actual_header', entry) return row_collection, head1_location, head2_location def _loadRowData(row, row_count, row_collection, comment_lines, col_length, start_row): """Loads the data in a specific row of the file. Args: row(list): containing the row data. row_count(int): the current row number. required_headers(list): column names that must exist. Return: rowdatacollection: updated with header row details. """ # Any lines that aren't headers, but are above the first row to contain # actual data will be stored as comment lines if row_count < start_row: comment_lines.append(row) return row_collection, comment_lines else: comment_lines.append(None) if '!' in row[-1] or '#' in row[-1]: row_collection.addValue('comment', row[-1]) # Add the row data in the order that it appears in the file # from left to right. for i in range(col_length): if i < len(row): row_collection.addValue(i, row[i]) return row_collection, comment_lines try: logger.info('Loading data file contents from disc - %s' % (path)) with open(path, 'rb') as csv_file: csv_file = csv.reader(csv_file) # Do a quick check of the file setup cols, col_length, head_list, start_row = _scanfile(path) # First entry doesn't want to have a comma in front when formatting. # but all of the others do. row_collection = RowDataCollection() row_collection.initCollection( do.FloatData(0, 0, format_str=' {0}', default='', no_of_dps=6)) for i in range(1, len(cols)): if cols[i] == True: row_collection.initCollection( do.FloatData(i, i, format_str=', {0}', default='', no_of_dps=6)) else: row_collection.initCollection( do.StringData(i, i, format_str=', {0}', default='')) row_collection.initCollection( do.StringData(0, 'actual_header', format_str='{0}', default='')) row_collection.initCollection( do.IntData(15, 'row_no', format_str=None, default='')) # Stores the comments found in the file comment_lines = [] first_data_line = False # Loop through the contents list loaded from file line-by-line. for i, line in enumerate(csv_file, 0): comment = hasCommentOnlyLine(''.join(line), comment_types) if comment or comment == '': comment_lines.append([comment, i]) # If we have a line that isn't a comment or a blank then it is going # to contain materials entries. else: # First non-comment is the headers if first_data_line == False: first_data_line = True row_collection, head1_loc, head2_loc = _loadHeadData( line, row_collection, col_length) else: row_collection, comment_lines = _loadRowData( line, i, row_collection, comment_lines, col_length, start_row) row_collection.addValue('row_no', i) except IOError: logger.warning('Cannot load file - IOError') raise IOError('Cannot load file at: ' + path) path_holder = filetools.PathHolder(path, root) mat_sub = dataobj.DataFileSubfileMat(path_holder, row_collection, comment_lines, path_holder.file_name, head1_loc, head2_loc) return mat_sub
def readMatCsvFile(datafile): """Loads the contents of the Materials CSV file referenced by datafile. Loads the data from the file referenced by the given TuflowFile object into a :class:'rowdatacollection' and a list of comment only lines. Args: datafile(TuflowFile): TuflowFile object with file details. Return: tuple: rowdatacollection, comment_lines(list). See Also: :class:'rowdatacollection'. """ value_seperator = ',' comment_types = ['#', '!'] csv_enum = dataobj.MatCsvEnum() subfile_details = {} def _loadHeadData(row, row_collection): """ """ new_row = [None] * 12 if '!' in row[-1] or '#' in row[-1]: row_collection.addValue('comment', row[-1]) new_row[0] = row[0] new_row[1] = row[1] new_row[9] = row[2] new_row[11] = row[3] row_length = len(new_row) for i, v in enumerate(new_row): if i < row_length: row_collection.addValue('actual_header', new_row[i]) return row_collection def _disectEntry(col_no, entry, new_row): """Breaks the row values into the appropriate object values. The materials file can have Excel style sub-values. i.e. it can have seperate columns defined within a bigger one. This function will break those values down into a format usable by the values initiated in the rowdatacollection. Args: col_no(int): the current column number. entry(string): the value of the current column. new_row(list): the row values to update. Return: list containing the updated row values. Note: This isn't very nice. Need to clean it up and find a better, safer way of dealing with breaking the row data up. It may be excess work but perhaps creating an xml converter could work quite will and make dealing with the file a bit easier? """ made_change = False # Put in ID and Hazard as normal if col_no == 0: new_row[0] = entry elif col_no == 11: new_row[11] = entry # Possible break up Manning's entry further elif col_no == 1: # See if there's more than one value in the Manning's category. splitval = entry.split(',') # If there is and it's numeric then it's a single value for 'n' if len(splitval) == 1: if uuf.isNumeric(splitval[0]): new_row[1] = splitval[0] # Otherwise it's a filename. These can be further separated # into two column headers to read from the sub files. else: strsplit = splitval[0].split('|') if len(strsplit) == 1: subfile_details[strsplit[0].strip()] = [] new_row[6] = strsplit[0].strip() elif len(strsplit) == 2: subfile_details[strsplit[0]] = [strsplit[1].strip()] new_row[6] = strsplit[0].strip() new_row[7] = strsplit[1].strip() else: subfile_details[strsplit[0]] = [ strsplit[1].strip(), strsplit[2].strip() ] new_row[6] = strsplit[0].strip() new_row[7] = strsplit[1].strip() new_row[8] = strsplit[2].strip() # If there's more than one value then it must be the Manning's # depth curve values (N1, Y1, N2, Y2). else: new_row[2] = splitval[0] new_row[3] = splitval[1] new_row[4] = splitval[2] new_row[5] = splitval[3] # Finally grab the infiltration parameters (IL, CL) elif col_no == 2: splitval = entry.split(',') new_row[9] = splitval[0] new_row[10] = splitval[1] return new_row def _loadRowData(row, row_count, row_collection): """Loads the data in a specific row of the file. Args: row(list): containing the row data. row_count(int): the current row number. required_headers(list): column names that must exist. Return: rowdatacollection: updated with header row details. """ if '!' in row[-1] or '#' in row[-1]: row_collection.addValue('comment', row[-1]) new_row = [None] * 12 # Add the row data in the order that it appears in the file # from left to right. for i in csv_enum.ITERABLE: if i < len(row): new_row = _disectEntry(i, row[i], new_row) for val, item in enumerate(new_row): row_collection.addValue(val, item) # First entry doesn't want to have a comma in front when formatting. row_collection = RowDataCollection() types = [1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 0] # Do the first entry separately because it has a different format string row_collection.initCollection( do.StringData(0, 0, format_str='{0}', default='')) for i, t in enumerate(types, 1): if t == 0: row_collection.initCollection( do.StringData(i, i, format_str=', {0}', default='')) else: row_collection.initCollection( do.FloatData(i, i, format_str=', {0}', default='', no_of_dps=3)) # Add a couple of extra rows to the row_collection for tracking the # data in the file. row_collection.initCollection( do.StringData(12, 'comment', format_str='{0}', default='')) row_collection.initCollection( do.StringData(13, 'actual_header', format_str='{0}', default='')) row_collection.initCollection( do.IntData(15, 'row_no', format_str=None, default='')) path = datafile.getAbsolutePath() try: logger.info('Loading data file contents from disc - %s' % (path)) with open(path, 'rb') as csv_file: csv_file = csv.reader(csv_file) # Stores the comments found in the file comment_lines = [] first_data_line = False line_count = 0 try: # Loop through the contents list loaded from file line-by-line. for i, line in enumerate(csv_file, 0): comment = hasCommentOnlyLine(''.join(line), comment_types) if comment or comment == '': comment_lines.append(comment) # If we have a line that isn't a comment or a blank then it is going # to contain materials entries. else: # First non-comment is the headers if first_data_line == False: first_data_line = True _loadHeadData(line, row_collection) else: _loadRowData(line, i, row_collection) row_collection.addValue('row_no', line_count) line_count += 1 comment_lines.append(None) except IndexError: logger.error( 'This file is not setup/formatted correctly for a Materials.CSV file:\n' + path) raise IndexError( 'File is not correctly formatted for a Materials.csv file') except AttributeError: logger.error( 'This file is not setup/formatted correctly for a Materials.CSV file:\n' + path) raise AttributeError( 'File is not correctly formatted for a Materials.csv file') except IOError: logger.warning('Cannot load file - IOError') raise IOError('Cannot load file at: ' + path) # Just need to reset the has_changed variable because it will have been # set to True while loading everything in. for i in range(0, len(csv_enum.ITERABLE)): row_collection.getDataObject(i).has_changed = False return row_collection, comment_lines, subfile_details
def readBcFile(datafile): """Loads the contents of the BC Database file refernced by datafile. Loads the data from the file referenced by the given TuflowFile object into a :class:'rowdatacollection' and a list of comment only lines. Args: datafile(TuflowFile): TuflowFile object with file details. Return: tuple: rowdatacollection, comment_lines(list). See Also: :class:'rowdatacollection'. """ value_seperator = ',' comment_types = ['#', '!'] bc_enum = dataobj.BcEnum() def _checkHeaders(row, required_headers): """Checks that any required headers can be found. Reviews the headers in the header row of the csv file to ensure that any specifically needed named column headers exist. Args: row(list): columns headers. required_headers(list): column names that must be included. Return: list if some headers not found of False otherwise. """ # Check what we have in the header row head_check = True for r in required_headers: if not r in row: head_check = False if not head_check: logger.warning('Required header (' + r + ') not' + 'found in file: ' + path) return head_check def _loadHeadData(row, row_collection, required_headers): """Loads the column header data. Adds the file defined names for the headers to the rowdatacollection. Args: row(list): containing the row data. row_collection(rowdatacollection): for updating. required_headers(list): column names that must exist. Return: rowdatacollection: updated with header row details. """ row_length = len(row) head_check = _checkHeaders(row, required_headers) for i, v in enumerate(bc_enum.ITERABLE): if i < row_length: row_collection.addValue('actual_header', row[i]) return row_collection def _loadRowData(row, row_count, row_collection): """Loads the data in a specific row of the file. Args: row(list): containing the row data. row_count(int): the current row number. required_headers(list): column names that must exist. Return: rowdatacollection: updated with header row details. """ if '!' in row[-1] or '#' in row[-1]: row_collection.addValue('comment', row[-1]) # Add the row data in the order that it appears in the file # from left to right. for i in bc_enum.ITERABLE: if i < len(row): row_collection.addValue(i, row[i]) return row_collection # Initialise the RowDataOjectCollection object with currect setup row_collection = RowDataCollection() for i, val in enumerate(bc_enum.ITERABLE): if i == 0: row_collection.initCollection( do.StringData(i, i, format_str='{0}', default='')) else: row_collection.initCollection( do.StringData(i, i, format_str=', {0}', default='')) row_collection.initCollection( do.StringData(0, 'actual_header', format_str=', {0}', default='')) row_collection.initCollection( do.IntData(15, 'row_no', format_str=None, default='')) path = datafile.getAbsolutePath() required_headers = ['Name', 'Source'] try: logger.info('Loading data file contents from disc - %s' % (path)) with open(path, 'rb') as csv_file: csv_file = csv.reader(csv_file) # Stores the comments found in the file comment_lines = [] first_data_line = False row_count = 0 # Loop through the contents list loaded from file line-by-line. for i, line in enumerate(csv_file, 0): comment = hasCommentOnlyLine(''.join(line), comment_types) if comment or comment == '': comment_lines.append(comment) # If we have a line that isn't a comment or a blank then it is going # to contain materials entries. else: # First non-comment is the headers if first_data_line == False: first_data_line = True row_collection = _loadHeadData(line, row_collection, required_headers) else: row_collection = _loadRowData(line, i, row_collection) row_collection.addValue('row_no', row_count) row_count += 1 comment_lines.append(None) except IOError: logger.warning('Cannot load file - IOError') raise IOError('Cannot load file at: ' + path) # Just need to reset the has_changed variable because it will have been # set to True while loading everything in. for i in range(0, len(bc_enum.ITERABLE)): row_collection.getDataObject(i).has_changed = False return row_collection, comment_lines
def _readOrificeRowData(self, unit_data, file_line): """Load the data defining the orifice openings in the bridge. Args: unit_data (list): the data pertaining to this unit. TODO: These errors are cryptic here as they're very specific to the RowDataCollections being accessed. Perhaps these should be made a little more relevant by raising a different error. Or they could be dealt with better here. """ # Add the new row data types to the object collection # All of them must have type, output format, default value and position # in the row as the first variables in vars. # The others are DataType specific. self.additional_row_collections['Orifice'] = RowDataCollection() self.additional_row_collections['Orifice'].initCollection( do.FloatData(0, rdt.CULVERT_INVERT, format_str='{:>10}', no_of_dps=3)) self.additional_row_collections['Orifice'].initCollection( do.FloatData(1, rdt.CULVERT_SOFFIT, format_str='{:>10}', no_of_dps=3)) self.additional_row_collections['Orifice'].initCollection( do.FloatData(2, rdt.CULVERT_AREA, format_str='{:>10}', no_of_dps=3, default=0.0)) self.additional_row_collections['Orifice'].initCollection( do.FloatData(3, rdt.CULVERT_CD_PART, format_str='{:>10}', no_of_dps=3, default=0.0)) self.additional_row_collections['Orifice'].initCollection( do.FloatData(4, rdt.CULVERT_CD_FULL, format_str='{:>10}', no_of_dps=3, default=0.0)) self.additional_row_collections['Orifice'].initCollection( do.FloatData(5, rdt.CULVERT_DROWNING, format_str='{:>10}', no_of_dps=3, default=0.0)) out_line = file_line + self.no_of_culvert_rows try: # Load the geometry data for i in range(file_line, out_line): # Put the values into the respective data objects # This is done based on the column widths set in the Dat file # for the river section. self.additional_row_collections['Opening'].addValue( rdt.CULVERT_INVERT, unit_data[i][0:10].strip()) self.additional_row_collections['Opening'].addValue( rdt.CULVERT_SOFFIT, unit_data[i][10:20].strip()) self.additional_row_collections['Opening'].addValue( rdt.CULVERT_AREA, unit_data[i][20:30].strip()) self.additional_row_collections['Opening'].addValue( rdt.CULVERT_CD_PART, unit_data[i][30:40].strip()) self.additional_row_collections['Opening'].addValue( rdt.CULVERT_CD_FULL, unit_data[i][40:50].strip()) self.additional_row_collections['Opening'].addValue( rdt.CULVERT_DROWNING, unit_data[i][50:60].strip()) except NotImplementedError: logger.error( 'Unable to read Unit Data(dataRowObject creation) - NotImplementedError' ) raise self.unit_length += self.no_of_culvert_rows return out_line
def _readMainRowData(self, unit_data, file_line): """Reads the units rows into the row collection. This is all the geometry data that occurs after the no of rows variable in the Bridge Units of the dat file. Args: unit_data (list): the data pertaining to this unit. """ # Add the new row data types to the object collection # All of them must have type, output format, default value and position # in the row as the first variables in vars. # The others are DataType specific. self.row_collection = RowDataCollection() self.row_collection.initCollection( do.FloatData(0, rdt.CHAINAGE, format_str='{:>10}', no_of_dps=3)) self.row_collection.initCollection( do.FloatData(1, rdt.ELEVATION, format_str='{:>10}', no_of_dps=3)) self.row_collection.initCollection( do.FloatData(2, rdt.ROUGHNESS, format_str='{:>10}', no_of_dps=3, default=0.0)) self.row_collection.initCollection( do.ConstantData(3, rdt.EMBANKMENT, ('L', 'R'), format_str='{:>11}', default='')) self.unit_length = 6 out_line = file_line + self.no_of_chainage_rows try: # Load the geometry data for i in range(file_line, out_line): # Put the values into the respective data objects # This is done based on the column widths set in the Dat file # for the river section. self.row_collection.addValue(rdt.CHAINAGE, unit_data[i][0:10].strip()) self.row_collection.addValue(rdt.ELEVATION, unit_data[i][10:20].strip()) self.row_collection.addValue(rdt.ROUGHNESS, unit_data[i][20:30].strip()) # Might not exist try: bank = unit_data[i][40:51].strip() except: bank = '' self.row_collection.addValue(rdt.EMBANKMENT, bank) except NotImplementedError: logger.error( 'Unable to read Unit Data(dataRowObject creation) - NotImplementedError' ) raise self.no_of_opening_rows = int(unit_data[out_line].strip()) self.unit_length += self.no_of_chainage_rows + 1 return out_line + 1
def _readRowData(self, unit_data, file_line): """Reads the units rows into the row collection. This is all the geometry data that occurs after the no of rows variable in the River Units of the dat file. Args: unit_data (list): the data pertaining to this unit. """ # Add the new row data types to the object collection # All of them must have type, output format, default value and position # in the row as the first variables in vars. # The others are DataType specific. self.row_collection = RowDataCollection() self.row_collection.initCollection( do.FloatData(0, rdt.CHAINAGE, format_str='{:>10}', no_of_dps=3)) self.row_collection.initCollection( do.FloatData(1, rdt.ELEVATION, format_str='{:>10}', no_of_dps=3)) self.row_collection.initCollection( do.FloatData(2, rdt.ROUGHNESS, format_str='{:>10}', default=0.0, no_of_dps=3)) self.row_collection.initCollection( do.SymbolData(3, rdt.PANEL_MARKER, '*', format_str='{:<5}', default=False)) self.row_collection.initCollection( do.FloatData(4, rdt.RPL, format_str='{:>5}', default=1.000, no_of_dps=3)) self.row_collection.initCollection( do.ConstantData(5, rdt.BANKMARKER, ('LEFT', 'RIGHT', 'BED'), format_str='{:<10}', default='')) self.row_collection.initCollection( do.FloatData(6, rdt.EASTING, format_str='{:>10}', default=0.0, no_of_dps=2)) self.row_collection.initCollection( do.FloatData(7, rdt.NORTHING, format_str='{:>10}', default=0.0, no_of_dps=2)) self.row_collection.initCollection( do.ConstantData(8, rdt.DEACTIVATION, ('LEFT', 'RIGHT'), format_str='{:<10}', default='')) # Default == '~' means to ignore formatting and apply '' when value is None self.row_collection.initCollection( do.StringData(9, rdt.SPECIAL, format_str='{:<10}', default='~')) out_line = file_line + self.unit_length try: # Load the geometry data for i in range(file_line, out_line): # Put the values into the respective data objects # This is done based on the column widths set in the Dat file # for the river section. self.row_collection.addValue(rdt.CHAINAGE, unit_data[i][0:10].strip()) self.row_collection.addValue(rdt.ELEVATION, unit_data[i][10:20].strip()) self.row_collection.addValue(rdt.ROUGHNESS, unit_data[i][20:30].strip()) self.row_collection.addValue(rdt.PANEL_MARKER, unit_data[i][30:35].strip()) self.row_collection.addValue(rdt.RPL, unit_data[i][35:40].strip()) self.row_collection.addValue(rdt.BANKMARKER, unit_data[i][40:50].strip()) self.row_collection.addValue(rdt.EASTING, unit_data[i][50:60].strip()) self.row_collection.addValue(rdt.NORTHING, unit_data[i][60:70].strip()) self.row_collection.addValue(rdt.DEACTIVATION, unit_data[i][70:80].strip()) self.row_collection.addValue(rdt.SPECIAL, unit_data[i][80:90].strip()) except NotImplementedError: logger.ERROR( 'Unable to read Unit Data(dataRowObject creation) - NotImplementedError' ) raise return out_line
def test_addDataRow_method(self): '''Test adding a new row to the river section ''' river = riverunit.RiverUnit(1, 1) river.head_data = self.header_vars river.unit_length = 18 river.row_collection # Create some data objects objs = [] objs.append(do.FloatData(0, rdt.CHAINAGE, format_str='{:>10}', no_of_dps=3)) objs.append(do.FloatData(1, rdt.ELEVATION, format_str='{:>10}', no_of_dps=3)) objs.append(do.FloatData(2, rdt.ROUGHNESS, format_str='{:>10}', default=0.0, no_of_dps=3)) objs.append(do.SymbolData(3, rdt.PANEL_MARKER, '*', format_str='{:<5}', default=False)) objs.append(do.FloatData(4, rdt.RPL, format_str='{:>5}', default=1.000, no_of_dps=3)) objs.append(do.ConstantData(5, rdt.BANKMARKER, ('LEFT', 'RIGHT', 'BED'), format_str='{:<10}', default='')) objs.append(do.FloatData(6, rdt.EASTING, format_str='{:>10}', default=0.0, no_of_dps=2)) objs.append(do.FloatData(7, rdt.NORTHING, format_str='{:>10}', default=0.0, no_of_dps=2)) objs.append(do.ConstantData(8, rdt.DEACTIVATION, ('LEFT', 'RIGHT'), format_str='{:<10}', default='')) objs.append(do.StringData(9, rdt.SPECIAL, format_str='{:<10}', default='~')) # Populate the data objs[0].data_collection = [5.996, 6.936, 7.446, 7.635, 8.561, 9.551, 10.323, 10.904, 12.542, 13.74, 13.788, 13.944, 15.008, 16.355, 17.424, 18.449, 19.416, 19.420] objs[1].data_collection = [37.56, 37.197, 36.726, 35.235, 35.196, 35.19, 35.229, 35.319, 35.637, 35.593, 35.592, 36.148, 36.559, 37.542, 38.518, 39.037, 39.146, 39.133] objs[2].data_collection = [0.08, 0.035, 0.035, 0.035, 0.035, 0.035, 0.035, 0.035, 0.035, 0.035, 0.035, 0.035, 0.08, 0.08, 0.08, 0.08, 0.08, 0.08] objs[3].data_collection = [False, True, False, False, False, False, False, False, False, False, False, False, True, False, False, False, False, False] objs[4].data_collection = [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0] objs[5].data_collection = ['LEFT', False, False, False, False, 'BED', False, False, False, False, False, False, False, False, False, False, False, 'RIGHT'] objs[6].data_collection = [291391.67, 291391.43, 291391.3, 291391.25, 291391.01, 291390.75, 291390.55, 291390.4, 291389.98, 291389.67, 291389.66, 291389.62, 291389.34, 291389.0, 291388.72, 291388.46, 291388.21, 291388.21] objs[7].data_collection = [86582.61, 86581.7, 86581.21, 86581.03, 86580.13, 86579.18, 86578.43, 86577.87, 86576.29, 86575.13, 86575.09, 86574.93, 86573.91, 86572.6, 86571.57, 86570.58, 86569.65, 86569.65] objs[8].data_collection = ['LEFT', False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, 'RIGHT'] objs[9].data_collection = ['16', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '4095'] # Add the data object to the row data collection col = RowDataCollection() for o in objs: o.record_length = 18 col._collection.append(o) river.row_collection = col # Add a new row river.addDataRow(row_vals={rdt.CHAINAGE: 9.42, rdt.ELEVATION: 35.2, rdt.ROUGHNESS: 0.035, rdt.SPECIAL: '1264'}, index=5) # Make sure that we get back the same values as we set. self.assertEqual(9.42, river.row_collection._collection[rdt.CHAINAGE].data_collection[5], 'Add new row - get chainage value failed') self.assertEqual(35.2, river.row_collection._collection[rdt.ELEVATION].data_collection[5], 'Add new row - get elevation value failed') self.assertEqual(0.035, river.row_collection._collection[rdt.ROUGHNESS].data_collection[5], 'Add new row - get roughness value failed') self.assertEqual(False, river.row_collection._collection[rdt.PANEL_MARKER].data_collection[5], 'Add new row - get panelmarker value failed') self.assertEqual(1.000, river.row_collection._collection[rdt.RPL].data_collection[5], 'Add new row - get rpl value failed') self.assertEqual(False, river.row_collection._collection[rdt.BANKMARKER].data_collection[5], 'Add new row - get bankmarker value failed') self.assertEqual(0.00, river.row_collection._collection[rdt.EASTING].data_collection[5], 'Add new row - get easting value failed') self.assertEqual(0.00, river.row_collection._collection[rdt.NORTHING].data_collection[5], 'Add new row - get northing value failed') self.assertEqual(False, river.row_collection._collection[rdt.DEACTIVATION].data_collection[5], 'Add new row - get deactivation value failed') self.assertEqual('1264', river.row_collection._collection[rdt.SPECIAL].data_collection[5], 'Add new row - get special value failed') # This is how we expect the data to look when we get back out of the addRow() method new_chainage = [5.996, 6.936, 7.446, 7.635, 8.561, 9.42, 9.551, 10.323, 10.904, 12.542, 13.74, 13.788, 13.944, 15.008, 16.355, 17.424, 18.449, 19.416, 19.42] new_elevation = [37.56, 37.197, 36.726, 35.235, 35.196, 35.2, 35.19, 35.229, 35.319, 35.637, 35.593, 35.592, 36.148, 36.559, 37.542, 38.518, 39.037, 39.146, 39.133] new_roughness = [0.08, 0.035, 0.035, 0.035, 0.035, 0.035, 0.035, 0.035, 0.035, 0.035, 0.035, 0.035, 0.035, 0.08, 0.08, 0.08, 0.08, 0.08, 0.08] new_panelmarker = [False, True, False, False, False, False, False, False, False, False, False, False, False, True, False, False, False, False, False] new_rpl = [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0] new_bankmarker = ['LEFT', False, False, False, False, False, 'BED', False, False, False, False, False, False, False, False, False, False, False, 'RIGHT'] new_easting = [291391.67, 291391.43, 291391.3, 291391.25, 291391.01, 0.00, 291390.75, 291390.55, 291390.4, 291389.98, 291389.67, 291389.66, 291389.62, 291389.34, 291389.0, 291388.72, 291388.46, 291388.21, 291388.21] new_northing = [86582.61, 86581.7, 86581.21, 86581.03, 86580.13, 0.00, 86579.18, 86578.43, 86577.87, 86576.29, 86575.13, 86575.09, 86574.93, 86573.91, 86572.6, 86571.57, 86570.58, 86569.65, 86569.65] new_deactivation = ['LEFT', False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, 'RIGHT'] new_special = ['16', '', '', '', '', '1264', '', '', '', '', '', '', '', '', '', '', '', '', '4095'] # Make sure that we get back the same values as we set. I.e. the data objects are updated as expected. self.assertListEqual(new_chainage, river.row_collection._collection[0].data_collection, 'Chainage list comparison after insertion fail') self.assertListEqual(new_elevation, river.row_collection._collection[1].data_collection, 'Elevation list comparison after insertion fail') self.assertListEqual(new_roughness, river.row_collection._collection[2].data_collection, 'Roughness list comparison after insertion fail') self.assertListEqual(new_panelmarker, river.row_collection._collection[3].data_collection, 'Panelmarker list comparison after insertion fail') self.assertListEqual(new_rpl, river.row_collection._collection[4].data_collection, 'Rpl list comparison after insertion fail') self.assertListEqual(new_bankmarker, river.row_collection._collection[5].data_collection, 'Bankmarker list comparison after insertion fail') self.assertListEqual(new_easting, river.row_collection._collection[6].data_collection, 'Easting list comparison after insertion fail') self.assertListEqual(new_northing, river.row_collection._collection[7].data_collection, 'Northing list comparison after insertion fail') self.assertListEqual(new_deactivation, river.row_collection._collection[8].data_collection, 'Deactivation list comparison after insertion fail') self.assertListEqual(new_special, river.row_collection._collection[9].data_collection, 'Special list comparison after insertion fail') # Check that it recognises illegal input values self.assertRaises(AttributeError, lambda: river.addDataRow({'trick': 39.1})) # Check that it recognises when it will cause a negative chainage increase self.assertRaises(ValueError, lambda: river.addDataRow({rdt.CHAINAGE: 10.42, rdt.ELEVATION: 35.3}, 5))
def test_getData_method(self): '''Test to check the suitability of the getData() method. ''' river = riverunit.RiverUnit(1, 1) river.head_data = self.header_vars river.unit_length = 18 river.row_collection # Create some data objects objs = [] objs.append(do.FloatData(0, rdt.CHAINAGE, format_str='{:>10}', no_of_dps=3)) objs.append(do.FloatData(1, rdt.ELEVATION, format_str='{:>10}', no_of_dps=3)) objs.append(do.FloatData(2, rdt.ROUGHNESS, format_str='{:>10}', default=0.0, no_of_dps=3)) objs.append(do.SymbolData(3, rdt.PANEL_MARKER, '*', format_str='{:<5}', default=False)) objs.append(do.FloatData(4, rdt.RPL, format_str='{:>5}', default=1.000, no_of_dps=3)) objs.append(do.ConstantData(5, rdt.BANKMARKER, ('LEFT', 'RIGHT', 'BED'), format_str='{:<10}', default='')) objs.append(do.FloatData(6, rdt.EASTING, format_str='{:>10}', default=0.0, no_of_dps=2)) objs.append(do.FloatData(7, rdt.NORTHING, format_str='{:>10}', default=0.0, no_of_dps=2)) objs.append(do.ConstantData(8, rdt.DEACTIVATION, ('LEFT', 'RIGHT'), format_str='{:<10}', default='')) objs.append(do.StringData(9, rdt.SPECIAL, format_str='{:<10}', default='~')) # Populate the data objs[0].data_collection = [5.996, 6.936, 7.446, 7.635, 8.561, 9.551, 10.323, 10.904, 12.542, 13.74, 13.788, 13.944, 15.008, 16.355, 17.424, 18.449, 19.416, 19.420] objs[1].data_collection = [37.56, 37.197, 36.726, 35.235, 35.196, 35.19, 35.229, 35.319, 35.637, 35.593, 35.592, 36.148, 36.559, 37.542, 38.518, 39.037, 39.146, 39.133] objs[2].data_collection = [0.08, 0.035, 0.035, 0.035, 0.035, 0.035, 0.035, 0.035, 0.035, 0.035, 0.035, 0.035, 0.08, 0.08, 0.08, 0.08, 0.08, 0.08] objs[3].data_collection = [False, True, False, False, False, False, False, False, False, False, False, False, True, False, False, False, False, False] objs[4].data_collection = [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0] objs[5].data_collection = ['LEFT', False, False, False, False, 'BED', False, False, False, False, False, False, False, False, False, False, False, 'RIGHT'] objs[6].data_collection = [291391.67, 291391.43, 291391.3, 291391.25, 291391.01, 291390.75, 291390.55, 291390.4, 291389.98, 291389.67, 291389.66, 291389.62, 291389.34, 291389.0, 291388.72, 291388.46, 291388.21, 291388.21] objs[7].data_collection = [86582.61, 86581.7, 86581.21, 86581.03, 86580.13, 86579.18, 86578.43, 86577.87, 86576.29, 86575.13, 86575.09, 86574.93, 86573.91, 86572.6, 86571.57, 86570.58, 86569.65, 86569.65] objs[8].data_collection = ['LEFT', False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, 'RIGHT'] objs[9].data_collection = ['16', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '4095'] # Add the data object to the row data collection river.row_collection = RowDataCollection() for o in objs: o.record_length = 18 river.row_collection._collection.append(o) # Setup the list that we expect to be returned from the getData() method out_data = \ ['RIVER (Culvert Exit) CH:7932 - Trimmed to BT', 'SECTION', '1.069', ' 15.078 1.111111 1000', ' 18', ' 5.996 37.560 0.080 1.000LEFT 291391.67 86582.61LEFT 16 ', ' 6.936 37.197 0.035* 1.000 291391.43 86581.70 ', ' 7.446 36.726 0.035 1.000 291391.30 86581.21 ', ' 7.635 35.235 0.035 1.000 291391.25 86581.03 ', ' 8.561 35.196 0.035 1.000 291391.01 86580.13 ', ' 9.551 35.190 0.035 1.000BED 291390.75 86579.18 ', ' 10.323 35.229 0.035 1.000 291390.55 86578.43 ', ' 10.904 35.319 0.035 1.000 291390.40 86577.87 ', ' 12.542 35.637 0.035 1.000 291389.98 86576.29 ', ' 13.740 35.593 0.035 1.000 291389.67 86575.13 ', ' 13.788 35.592 0.035 1.000 291389.66 86575.09 ', ' 13.944 36.148 0.035 1.000 291389.62 86574.93 ', ' 15.008 36.559 0.080* 1.000 291389.34 86573.91 ', ' 16.355 37.542 0.080 1.000 291389.00 86572.60 ', ' 17.424 38.518 0.080 1.000 291388.72 86571.57 ', ' 18.449 39.037 0.080 1.000 291388.46 86570.58 ', ' 19.416 39.146 0.080 1.000 291388.21 86569.65 ', ' 19.420 39.133 0.080 1.000RIGHT 291388.21 86569.65RIGHT 4095 '] # Get the data and check it against our template data = river.getData() self.assertEquals(out_data, data, 'getData() formatting failed')