Example #1
0
    def setupRowCollection():
        """Setup the RowDataCollection for loading the data into.
        """
        # First entry doesn't want to have a comma in front when formatting.
        row_collection = RowDataCollection()
        types = [0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1]

        # Do the first entry separately because it has a different format string
        row_collection.addToCollection(
            do.StringData(0, format_str='{0}', default=''))
        for i, t in enumerate(types, 1):
            if t == 0:
                row_collection.addToCollection(
                    do.StringData(i, format_str=', {0}', default=''))
            else:
                row_collection.addToCollection(
                    do.FloatData(i,
                                 format_str=', {0}',
                                 no_of_dps=3,
                                 default=0.00))

        # Add a couple of extra rows to the row_collection for tracking the
        # data in the file.
        row_collection.addToCollection(do.IntData('row_no'))

        return row_collection
Example #2
0
    def setupRowData(self):
        """Setup the main geometry and opening RowCollection's.

        These are used by all BridgeUnits, but they're added to a method called
        by the constructor in cases anyone need to override them.
        """
        main_dobjs = [
            do.FloatData(rdt.CHAINAGE,
                         format_str='{:>10}',
                         no_of_dps=3,
                         update_callback=self.checkIncreases),
            do.FloatData(rdt.ELEVATION, format_str='{:>10}', no_of_dps=3),
            do.FloatData(rdt.ROUGHNESS,
                         format_str='{:>10}',
                         no_of_dps=3,
                         default=0.039),
            do.ConstantData(rdt.EMBANKMENT, ('', 'L', 'R'),
                            format_str='{:>11}',
                            default=''),
        ]
        self.row_data['main'] = RowDataCollection.bulkInitCollection(
            main_dobjs)
        self.row_data['main'].setDummyRow({
            rdt.CHAINAGE: 0,
            rdt.ELEVATION: 0,
            rdt.ROUGHNESS: 0
        })

        open_dobjs = [
            do.FloatData(rdt.OPEN_START,
                         format_str='{:>10}',
                         no_of_dps=3,
                         update_callback=self.checkOpening),
            do.FloatData(rdt.OPEN_END,
                         format_str='{:>10}',
                         no_of_dps=3,
                         update_callback=self.checkOpening),
            do.FloatData(rdt.SPRINGING_LEVEL,
                         format_str='{:>10}',
                         no_of_dps=3,
                         default=0.0),
            do.FloatData(rdt.SOFFIT_LEVEL,
                         format_str='{:>10}',
                         no_of_dps=3,
                         default=0.0),
        ]
        self.row_data['opening'] = RowDataCollection.bulkInitCollection(
            open_dobjs)
        self.row_data['opening'].setDummyRow({
            rdt.OPEN_START: 0,
            rdt.OPEN_END: 0
        })
Example #3
0
    def __init__(self, **kwargs):
        """Constructor

        Args:
            node_count (int): The number of nodes in the model. We need this to 
                know how many lines there are to read from the contents list.
            fileOrder (int): The location of the initial conditions in the 
                .DAT file. This will always be at the end but before the 
                GISINFO if there is any.
        """
        AUnit.__init__(self, **kwargs)
        self._unit_type = InitialConditionsUnit.UNIT_TYPE
        self._unit_category = InitialConditionsUnit.UNIT_CATEGORY
        self._name = "initial_conditions"
        self._name_types = {}
        self._node_count = 0
#         self.has_datarows = True
#         self.has_ics = False
        
        dobjs = [
            do.StringData(rdt.LABEL, format_str='{:<12}'),
            do.StringData(rdt.QMARK, format_str='{:>2}', default='y'),
            do.FloatData(rdt.FLOW, format_str='{:>10}', default=0.000, no_of_dps=3),
            do.FloatData(rdt.STAGE, format_str='{:>10}', default=0.000, no_of_dps=3),
            do.FloatData(rdt.FROUDE_NO, format_str='{:>10}', default=0.000, no_of_dps=3),
            do.FloatData(rdt.VELOCITY, format_str='{:>10}', default=0.000, no_of_dps=3),
            do.FloatData(rdt.UMODE, format_str='{:>10}', default=0.000, no_of_dps=3),
            do.FloatData(rdt.USTATE, format_str='{:>10}', default=0.000, no_of_dps=3),
            do.FloatData(rdt.ELEVATION, format_str='{:>10}', default=0.000, no_of_dps=3),
        ]
        self.row_data['main'] = RowDataCollection.bulkInitCollection(dobjs)
Example #4
0
 def __init__(self, **kwargs): 
     """Constructor.
     
     Args:
         fileOrder (int): The location of this unit in the file.
     """
     AUnit.__init__(self, **kwargs)
     
     self._unit_type = HtbdyUnit.UNIT_TYPE
     self._unit_category = HtbdyUnit.UNIT_CATEGORY
     self._name = 'Htbd'
     
     time_units = (
         'SECONDS', 'MINUTES', 'HOURS', 'DAYS', 'WEEKS', 'FORTNIGHTS',
         'LUNAR MONTHS', 'MONTHS', 'QUARTERS', 'YEARS', 'DECADES', 'USER SET',
     )
     self.head_data = {
         'comment': HeadDataItem('', '', 0, 1, dtype=dt.STRING),
         'multiplier': HeadDataItem(1.000, '{:>10}', 0, 1, dtype=dt.FLOAT, dps=3),
         'time_units': HeadDataItem('HOURS', '{:>10}', 2, 0, dtype=dt.CONSTANT, choices=time_units),
         'extending_method': HeadDataItem('EXTEND', '{:>10}', 2, 0, dtype=dt.CONSTANT, choices=('EXTEND', 'NOEXTEND', 'REPEAT')),
         'interpolation': HeadDataItem('LINEAR', '{:>10}', 2, 0, dtype=dt.CONSTANT, choices=('LINEAR', 'SPLINE')),
     }
     
     dobjs = [
         do.FloatData(rdt.ELEVATION, format_str='{:>10}', no_of_dps=3),
         do.FloatData(rdt.TIME, format_str='{:>10}', no_of_dps=3, update_callback=self.checkIncreases),
     ]
     self.row_data['main'] = RowDataCollection.bulkInitCollection(dobjs)
     self.row_data['main'].setDummyRow({rdt.TIME: 0, rdt.ELEVATION: 0})
Example #5
0
    def __init__(self, **kwargs): 
        """Constructor.
        
        Args:
            fileOrder (int): The location of this unit in the file.
        """
        AUnit.__init__(self, **kwargs)

        self._name = 'Spl'
        self._name_ds = 'SplDS'
        self.head_data = {
            'comment': HeadDataItem('', '', 0, 1, dtype=dt.STRING),
            'weir_coef': HeadDataItem(1.700, '{:>10}', 1, 0, dtype=dt.FLOAT, dps=3),
            'modular_limit': HeadDataItem(0.700, '{:>10}', 1, 2, dtype=dt.FLOAT, dps=3),
        }

        self._unit_type = SpillUnit.UNIT_TYPE
        self._unit_category = SpillUnit.UNIT_CATEGORY
        
        dobjs = [
            do.FloatData(rdt.CHAINAGE, format_str='{:>10}', no_of_dps=3, update_callback=self.checkIncreases),
            do.FloatData(rdt.ELEVATION, format_str='{:>10}', no_of_dps=3),
            do.FloatData(rdt.EASTING, format_str='{:>10}', no_of_dps=2, default=0.00),
            do.FloatData(rdt.NORTHING, format_str='{:>10}', no_of_dps=2, default=0.00),
        ]
        self.row_data['main'] = RowDataCollection.bulkInitCollection(dobjs)
        self.row_data['main'].setDummyRow({rdt.CHAINAGE: 0, rdt.ELEVATION: 0})
Example #6
0
    def setupRowCollection():
        """Setup the RowDataCollection for loading the data into.
        """
        # First entry doesn't want to have a comma in front when formatting.
        row_collection = RowDataCollection()
        types = [0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1]
        
        # Do the first entry separately because it has a different format string
        row_collection.addToCollection(do.StringData(0, format_str='{0}', default=''))
        for i, t in enumerate(types, 1):
            if t == 0:
                row_collection.addToCollection(do.StringData(i, format_str=', {0}', default=''))
            else:
                row_collection.addToCollection(do.FloatData(i, format_str=', {0}', no_of_dps=3, default=0.00))

        # Add a couple of extra rows to the row_collection for tracking the
        # data in the file. 
        row_collection.addToCollection(do.IntData('row_no'))
        
        return row_collection
Example #7
0
    def __init__(self, **kwargs): 
        """Constructor.
        
        Args:
            fileOrder (int): The location of this unit in the file.
            reach_number (int): The reach ID for this unit.
        """
        AUnit.__init__(self, **kwargs)

        self._unit_type = RiverUnit.UNIT_TYPE
        self._unit_category = RiverUnit.UNIT_CATEGORY
        if self._name == 'unknown': self._name = 'RivUS'

        self.reach_number = kwargs.get('reach_number', -1)
        
        self.head_data = {
            'comment': HeadDataItem('', '', 0, 1, dtype=dt.STRING),
            'spill1': HeadDataItem('', '{:<12}', 2, 3, dtype=dt.STRING),
            'spill2': HeadDataItem('', '{:<12}', 2, 4, dtype=dt.STRING),
            'lateral1': HeadDataItem('', '{:<12}', 2, 6, dtype=dt.STRING),
            'lateral2': HeadDataItem('', '{:<12}', 2, 7, dtype=dt.STRING),
            'lateral3': HeadDataItem('', '{:<12}', 2, 8, dtype=dt.STRING),
            'lateral4': HeadDataItem('', '{:<12}', 2, 9, dtype=dt.STRING),
            'distance': HeadDataItem(0.0, '{:>10}', 3, 0, dtype=dt.FLOAT, dps=3),
            'slope': HeadDataItem(0.0001, '{:>20}', 3, 1, dtype=dt.FLOAT, dps=4),
            'density': HeadDataItem(1000, '{:>10}', 3, 2, dtype=dt.INT),
        }
        
        '''
            Add the new row data types to the object collection
            All of them must have type, output format, and position
            in the row all other arguments are excepted as **kwargs.
        '''
        dobjs = [
            # update_callback is called every time a value is added or updated
            do.FloatData(rdt.CHAINAGE, format_str='{:>10}', no_of_dps=3, update_callback=self.checkIncreases),
            do.FloatData(rdt.ELEVATION, format_str='{:>10}', no_of_dps=3),
            do.FloatData(rdt.ROUGHNESS, format_str='{:>10}', default=0.039, no_of_dps=3),
            do.SymbolData(rdt.PANEL_MARKER, '*', format_str='{:<5}', default=False),
            do.FloatData(rdt.RPL, format_str='{:>5}', default=1.000, no_of_dps=3),
            do.ConstantData(rdt.BANKMARKER, ('', 'LEFT', 'RIGHT', 'BED'), format_str='{:<10}', default=''),
            do.FloatData(rdt.EASTING, format_str='{:>10}', default=0.0, no_of_dps=2),
            do.FloatData(rdt.NORTHING, format_str='{:>10}', default=0.0, no_of_dps=2),
            do.ConstantData(rdt.DEACTIVATION, ('', 'LEFT', 'RIGHT'), format_str='{:<10}', default=''),
            # Default == '~' means to ignore formatting and apply '' when value is None
            do.StringData(rdt.SPECIAL, format_str='{:<10}', default='~'),
        ]
        self.row_data['main'] = RowDataCollection.bulkInitCollection(dobjs)
        self.row_data['main'].setDummyRow({rdt.CHAINAGE: 0, rdt.ELEVATION:0, rdt.ROUGHNESS: 0})
Example #8
0
 def setupRowData(self):
     """Setup the main geometry and opening RowCollection's. 
     
     These are used by all BridgeUnits, but they're added to a method called
     by the constructor in cases anyone need to override them.
     """
     main_dobjs = [
         do.FloatData(rdt.CHAINAGE, format_str='{:>10}', no_of_dps=3, update_callback=self.checkIncreases),
         do.FloatData(rdt.ELEVATION, format_str='{:>10}', no_of_dps=3),
         do.FloatData(rdt.ROUGHNESS, format_str='{:>10}', no_of_dps=3, default=0.039),
         do.ConstantData(rdt.EMBANKMENT, ('', 'L', 'R'), format_str='{:>11}', default=''),
     ]
     self.row_data['main'] = RowDataCollection.bulkInitCollection(main_dobjs) 
     self.row_data['main'].setDummyRow({rdt.CHAINAGE: 0, rdt.ELEVATION: 0,
                                        rdt.ROUGHNESS: 0})
     
     open_dobjs = [
         do.FloatData(rdt.OPEN_START, format_str='{:>10}', no_of_dps=3, update_callback=self.checkOpening),
         do.FloatData(rdt.OPEN_END, format_str='{:>10}', no_of_dps=3, update_callback=self.checkOpening),
         do.FloatData(rdt.SPRINGING_LEVEL, format_str='{:>10}', no_of_dps=3, default=0.0),
         do.FloatData(rdt.SOFFIT_LEVEL, format_str='{:>10}', no_of_dps=3, default=0.0),
     ]
     self.row_data['opening'] = RowDataCollection.bulkInitCollection(open_dobjs) 
     self.row_data['opening'].setDummyRow({rdt.OPEN_START: 0, rdt.OPEN_END: 0})
Example #9
0
    def __init__(self, **kwargs):
        """Constructor.

        Args:
            fileOrder (int): The location of this unit in the file.
        """
        super(ReservoirUnit, self).__init__(**kwargs)

        self._name = 'Res'
        self.head_data = {
            'revision':
            HeadDataItem(0, '{:<1}', 0, 0, dtype=dt.INT, allow_blank=True),
            'comment':
            HeadDataItem('', '', 0, 1, dtype=dt.STRING),
            'easting':
            HeadDataItem(0.000, '{:>10}', 1, 0, dtype=dt.FLOAT, dps=3),
            'northing':
            HeadDataItem(0.000, '{:>10}', 1, 1, dtype=dt.FLOAT, dps=3),
            'runoff_factor':
            HeadDataItem(0.000, '{:>10}', 1, 2, dtype=dt.FLOAT, dps=3),
            'lateral1':
            HeadDataItem('', '{:<12}', 2, 0, dtype=dt.STRING),
            'lateral2':
            HeadDataItem('', '{:<12}', 2, 1, dtype=dt.STRING),
            'lateral3':
            HeadDataItem('', '{:<12}', 2, 2, dtype=dt.STRING),
            'lateral4':
            HeadDataItem('', '{:<12}', 2, 3, dtype=dt.STRING),
            'names': []
        }

        self._unit_type = ReservoirUnit.UNIT_TYPE
        self._unit_category = ReservoirUnit.UNIT_CATEGORY

        dobjs = [
            do.FloatData(rdt.ELEVATION,
                         format_str='{:>10}',
                         no_of_dps=3,
                         use_sn=1000000,
                         update_callback=self.checkIncreases),
            do.FloatData(rdt.AREA,
                         format_str='{:>10}',
                         no_of_dps=3,
                         use_sn=1000000,
                         update_callback=self.checkIncreases),
        ]
        self.row_data['main'] = RowDataCollection.bulkInitCollection(dobjs)
        self.row_data['main'].setDummyRow({rdt.ELEVATION: 0, rdt.AREA: 0})
Example #10
0
    def __init__(self, **kwargs): 
        """Constructor.
        
        See Also:
            BridgeUnit
        """
        BridgeUnit.__init__(self, **kwargs)
        
        self._unit_type = BridgeUnitUsbpr.UNIT_TYPE
        self._unit_category = BridgeUnit.UNIT_CATEGORY

        # Fill in the header values these contain the data at the top of the
        # section
        self.head_data = {
            'comment': HeadDataItem('', '', 0, 1, dtype=dt.STRING),
            'remote_us': HeadDataItem('', '{:<12}', 2, 2, dtype=dt.STRING),
            'remote_ds': HeadDataItem('', '{:<12}', 2, 3, dtype=dt.STRING),
            'roughness_type': HeadDataItem('MANNING', '{:<7}', 3, 0, dtype=dt.CONSTANT, choices=('MANNING',)),
            'calibration_coef': HeadDataItem(1.000, '{:>10}', 4, 0, dtype=dt.FLOAT, dps=3),
            'skew_angle': HeadDataItem(0.000, '{:>10}', 4, 1, dtype=dt.FLOAT, dps=3),
            'width': HeadDataItem(0.000, '{:>10}', 4, 2, dtype=dt.FLOAT, dps=3),
            'dual_distance': HeadDataItem(0.000, '{:>10}', 4, 3, dtype=dt.FLOAT, dps=3),
            'num_of_orifices': HeadDataItem(0, '{:>10}', 4, 4, dtype=dt.INT),
            'orifice_flag': HeadDataItem('', '{:>10}', 4, 5, dtype=dt.CONSTANT, choices=('', 'ORIFICE')),
            'op_lower': HeadDataItem(0.000, '{:>10}', 4, 6, dtype=dt.FLOAT, dps=3),
            'op_upper': HeadDataItem(0.000, '{:>10}', 4, 7, dtype=dt.FLOAT, dps=3),
            'op_cd': HeadDataItem(0.000, '{:>10}', 4, 8, dtype=dt.FLOAT, dps=3),
            'abutment_type': HeadDataItem('3', '{:>10}', 5, 0, dtype=dt.CONSTANT, choices=('1', '2', '3')),
            'num_of_piers': HeadDataItem(0, '{:>10}', 6, 0, dtype=dt.INT),
            'pier_shape': HeadDataItem('FLAT', '{:<10}', 6, 1, dtype=dt.CONSTANT, choices=('FLAT', 'ARCH')),
            'pier_shape_2': HeadDataItem('', '{:<10}', 6, 2, dtype=dt.CONSTANT, choices=('FLAT', 'ARCH'), allow_blank=True),
            'pier_calibration_coef': HeadDataItem('', '{:>10}', 6, 3, dtype=dt.FLOAT, dps=3, allow_blank=True),
            'abutment_align': HeadDataItem('ALIGNED', '{:>10}', 7, 0, dtype=dt.CONSTANT, choices=('ALIGNED', 'SKEW')),
        }

        # Add an culvert RowCollection to self.row_data dict
        dobjs = [
            do.FloatData(rdt.INVERT, format_str='{:>10}', no_of_dps=3),
            do.FloatData(rdt.SOFFIT, format_str='{:>10}', no_of_dps=3),
            do.FloatData(rdt.AREA, format_str='{:>10}', no_of_dps=3, default=0.0),
            do.FloatData(rdt.CD_PART, format_str='{:>10}', no_of_dps=3, default=1.0),
            do.FloatData(rdt.CD_FULL, format_str='{:>10}', no_of_dps=3, default=1.0),
            do.FloatData(rdt.DROWNING, format_str='{:>10}', no_of_dps=3, default=1.0),
        ]
        self.row_data['culvert'] = RowDataCollection.bulkInitCollection(dobjs) 
        self.row_data['culvert'].setDummyRow({rdt.INVERT: 0, rdt.SOFFIT: 0})
Example #11
0
    def __init__(self, **kwargs):
        """Constructor.
        
        Args:
            fileOrder (int): The location of this unit in the file.
        """
        AUnit.__init__(self, **kwargs)

        self._name = 'Spl'
        self._name_ds = 'SplDS'
        self.head_data = {
            'comment':
            HeadDataItem('', '', 0, 1, dtype=dt.STRING),
            'weir_coef':
            HeadDataItem(1.700, '{:>10}', 1, 0, dtype=dt.FLOAT, dps=3),
            'modular_limit':
            HeadDataItem(0.700, '{:>10}', 1, 2, dtype=dt.FLOAT, dps=3),
        }

        self._unit_type = SpillUnit.UNIT_TYPE
        self._unit_category = SpillUnit.UNIT_CATEGORY

        dobjs = [
            do.FloatData(rdt.CHAINAGE,
                         format_str='{:>10}',
                         no_of_dps=3,
                         update_callback=self.checkIncreases),
            do.FloatData(rdt.ELEVATION, format_str='{:>10}', no_of_dps=3),
            do.FloatData(rdt.EASTING,
                         format_str='{:>10}',
                         no_of_dps=2,
                         default=0.00),
            do.FloatData(rdt.NORTHING,
                         format_str='{:>10}',
                         no_of_dps=2,
                         default=0.00),
        ]
        self.row_data['main'] = RowDataCollection.bulkInitCollection(dobjs)
        self.row_data['main'].setDummyRow({rdt.CHAINAGE: 0, rdt.ELEVATION: 0})
Example #12
0
    def __init__(self, **kwargs):
        """Constructor.
        """
        super(RefhUnit, self).__init__(**kwargs)

        self._unit_type = RefhUnit.UNIT_TYPE
        self._unit_category = RefhUnit.UNIT_CATEGORY
        if self._name == 'unknown':
            self._name = 'Refh_unit'

        # Fill in the header values these contain the data at the top of the
        # section, such as the unit name and labels.
        self.head_data = {
            'revision':
            HeadDataItem(1, '{:<1}', 0, 0, dtype=dt.INT),
            'comment':
            HeadDataItem('', '', 0, 1, dtype=dt.STRING),
            'z':
            HeadDataItem(0.000, '{:>10}', 2, 0, dtype=dt.FLOAT, dps=3),
            'easting':
            HeadDataItem('', '{:>10}', 2, 1, dtype=dt.STRING),
            'northing':
            HeadDataItem('', '{:>10}', 2, 2, dtype=dt.STRING),
            'time_delay':
            HeadDataItem(0.000, '{:>10}', 3, 0, dtype=dt.FLOAT, dps=3),
            'time_step':
            HeadDataItem(1.0, '{:>10}', 3, 1, dtype=dt.FLOAT, dps=3),
            'bf_only':
            HeadDataItem('', '{:>10}', 3, 2, dtype=dt.STRING),
            'sc_flag':
            HeadDataItem('SCALEFACT',
                         '{:>10}',
                         3,
                         3,
                         dtype=dt.CONSTANT,
                         choices=('SCALEFACT', 'PEAKVALUE')),
            'scale_factor':
            HeadDataItem(1.000, '{:>10}', 3, 4, dtype=dt.FLOAT, dps=3),
            'hydrograph_mode':
            HeadDataItem('HYDROGRAPH',
                         '{:>10}',
                         3,
                         5,
                         dtype=dt.CONSTANT,
                         choices=('HYDROGRAPH', 'HYETOGRAPH')),
            'hydrograph_scaling':
            HeadDataItem('RUNOFF',
                         '{:>10}',
                         3,
                         6,
                         dtype=dt.CONSTANT,
                         choices=('RUNOFF', 'FULL')),
            'min_flow':
            HeadDataItem(1.000, '{:>10}', 3, 7, dtype=dt.FLOAT, dps=3),
            'catchment_area':
            HeadDataItem(0.00, '{:>10}', 4, 0, dtype=dt.FLOAT, dps=3),
            'saar':
            HeadDataItem(0, '{:>10}', 4, 1, dtype=dt.INT),
            'urbext':
            HeadDataItem(0.000, '{:>10}', 4, 2, dtype=dt.FLOAT, dps=5),
            'season':
            HeadDataItem('DEFAULT',
                         '{:>10}',
                         4,
                         3,
                         dtype=dt.CONSTANT,
                         choices=('DEFAULT', 'WINTER', 'SUMMER')),
            'published_report':
            HeadDataItem('DLL',
                         '{:>10}',
                         4,
                         4,
                         dtype=dt.CONSTANT,
                         choices=('DLL', 'REPORT')),

            # Urban - only used if 'urban' == 'URBANREFH'
            # Note: urban involves updating the revision number and published_report as well.
            #       if you want to set it to urban you should use useUrban(True) rather than
            #       set this directly (or useUrban(False) to deactivate it).
            'urban':
            HeadDataItem('',
                         '{:>10}',
                         4,
                         5,
                         dtype=dt.CONSTANT,
                         choices=('', 'URBANREFH')),
            'subarea_1':
            HeadDataItem(0.00, '{:>10}', 5, 0, dtype=dt.FLOAT, dps=2),
            'dplbar_1':
            HeadDataItem(0.000, '{:>10}', 5, 1, dtype=dt.FLOAT, dps=3),
            'suburbext_1':
            HeadDataItem(0.000, '{:>10}', 5, 2, dtype=dt.FLOAT, dps=3),
            'calibration_1':
            HeadDataItem(0.000, '{:>10}', 5, 3, dtype=dt.FLOAT, dps=3),
            'subarea_2':
            HeadDataItem(0.00, '{:>10}', 6, 0, dtype=dt.FLOAT, dps=2),
            'dplbar_2':
            HeadDataItem(0.000, '{:>10}', 6, 1, dtype=dt.FLOAT, dps=3),
            'suburbext_2':
            HeadDataItem(0.000, '{:>10}', 6, 2, dtype=dt.FLOAT, dps=3),
            'calibration_2':
            HeadDataItem(0.000, '{:>10}', 6, 3, dtype=dt.FLOAT, dps=3),
            'subrunoff_2':
            HeadDataItem(0.000, '{:>10}', 6, 4, dtype=dt.FLOAT, dps=3),
            'sewer_rp_2':
            HeadDataItem('RETURN',
                         '{:>10}',
                         6,
                         5,
                         dtype=dt.CONSTANT,
                         choices=('RETURN', 'DEPTH')),
            'sewer_depth_2':
            HeadDataItem(0.000, '{:>10}', 6, 6, dtype=dt.FLOAT, dps=3),
            'sewer_lossvolume_2':
            HeadDataItem('VOLUME',
                         '{:>10}',
                         6,
                         7,
                         dtype=dt.CONSTANT,
                         choices=('VOLUME', 'FLOW')),
            'subarea_3':
            HeadDataItem(0.00, '{:>10}', 7, 0, dtype=dt.FLOAT, dps=2),
            'dplbar_3':
            HeadDataItem(0.000, '{:>10}', 7, 1, dtype=dt.FLOAT, dps=3),
            'suburbext_3':
            HeadDataItem(0.000, '{:>10}', 7, 2, dtype=dt.FLOAT, dps=3),
            'calibration_3':
            HeadDataItem(0.000, '{:>10}', 7, 3, dtype=dt.FLOAT, dps=3),
            'subrunoff_3':
            HeadDataItem(0.000, '{:>10}', 7, 4, dtype=dt.FLOAT, dps=3),
            'storm_area':
            HeadDataItem(0.00, '{:>10}', 8, 0, dtype=dt.FLOAT, dps=2),
            'storm_duration':
            HeadDataItem(0.000, '{:>10}', 8, 1, dtype=dt.FLOAT, dps=3),
            'sn_rate':
            HeadDataItem(0.000, '{:>10}', 8, 2, dtype=dt.FLOAT, dps=3),
            'rainfall_flag':
            HeadDataItem('DESIGN',
                         '{:>10}',
                         9,
                         0,
                         dtype=dt.CONSTANT,
                         choices=('DESIGN', 'USER')),
            'arf_flag':
            HeadDataItem('DESIGN',
                         '{:>10}',
                         9,
                         1,
                         dtype=dt.CONSTANT,
                         choices=('DESIGN', 'USER')),
            'rainfall_comment':
            HeadDataItem('', '', 9, 2, dtype=dt.STRING),
            'rainfall_odepth':
            HeadDataItem(0.000, '{:>10}', 10, 0, dtype=dt.FLOAT, dps=3),
            'return_period':
            HeadDataItem(0, '{:>10}', 10, 1, dtype=dt.INT),
            'arf':
            HeadDataItem(0.000, '{:>10}', 10, 2, dtype=dt.FLOAT, dps=5),
            'c':
            HeadDataItem(0.000, '{:>10}', 10, 3, dtype=dt.FLOAT, dps=5),
            'd1':
            HeadDataItem(0.000, '{:>10}', 10, 4, dtype=dt.FLOAT, dps=5),
            'd2':
            HeadDataItem(0.000, '{:>10}', 10, 5, dtype=dt.FLOAT, dps=5),
            'd2':
            HeadDataItem(0.000, '{:>10}', 10, 6, dtype=dt.FLOAT, dps=5),
            'd3':
            HeadDataItem(0.000, '{:>10}', 10, 7, dtype=dt.FLOAT, dps=5),
            'e':
            HeadDataItem(0.000, '{:>10}', 10, 8, dtype=dt.FLOAT, dps=5),
            'f':
            HeadDataItem(0.000, '{:>10}', 10, 9, dtype=dt.FLOAT, dps=5),
            'rp_flag':
            HeadDataItem('DESIGN',
                         '{:>10}',
                         11,
                         0,
                         dtype=dt.CONSTANT,
                         choices=('DESIGN', 'USER')),
            'scf_flag':
            HeadDataItem('DESIGN',
                         '{:>10}',
                         11,
                         1,
                         dtype=dt.CONSTANT,
                         choices=('DESIGN', 'USER')),
            'scf':
            HeadDataItem(0.000, '{:>10}', 11, 2, dtype=dt.FLOAT, dps=5),
            'use_refined_rainfall':
            HeadDataItem('0',
                         '{:>10}',
                         11,
                         3,
                         dtype=dt.CONSTANT,
                         choices=('', '0', '1')),
            'cmax_flag':
            HeadDataItem('DESIGN',
                         '{:>10}',
                         12,
                         0,
                         dtype=dt.CONSTANT,
                         choices=('DESIGN', 'USER')),
            'cini_flag':
            HeadDataItem('DESIGN',
                         '{:>10}',
                         12,
                         1,
                         dtype=dt.CONSTANT,
                         choices=('DESIGN', 'USER')),
            'alpha_flag':
            HeadDataItem('DESIGN',
                         '{:>10}',
                         12,
                         2,
                         dtype=dt.CONSTANT,
                         choices=('DESIGN', 'USER')),
            'models_comment':
            HeadDataItem('', '{}', 12, 3, dtype=dt.STRING),
            'cm_dcf':
            HeadDataItem(0.000, '{:>10}', 13, 0, dtype=dt.FLOAT, dps=3),
            'cmax':
            HeadDataItem(0.000, '{:>10}', 13, 1, dtype=dt.FLOAT, dps=3),
            'cini':
            HeadDataItem(0.000, '{:>10}', 13, 2, dtype=dt.FLOAT, dps=3),
            'alpha':
            HeadDataItem(0.000, '{:>10}', 13, 3, dtype=dt.FLOAT, dps=3),
            'bfihost':
            HeadDataItem(0.000, '{:>10}', 13, 4, dtype=dt.FLOAT, dps=3),
            'uh_flag':
            HeadDataItem('DESIGN',
                         '{:>10}',
                         14,
                         0,
                         dtype=dt.CONSTANT,
                         choices=('DESIGN', 'USER')),
            'tp_flag':
            HeadDataItem('DESIGN',
                         '{:>10}',
                         14,
                         1,
                         dtype=dt.CONSTANT,
                         choices=('DESIGN', 'USER')),
            'up_flag':
            HeadDataItem('DESIGN',
                         '{:>10}',
                         14,
                         3,
                         dtype=dt.CONSTANT,
                         choices=('DESIGN', 'USER')),
            'uk_flag':
            HeadDataItem('DESIGN',
                         '{:>10}',
                         14,
                         4,
                         dtype=dt.CONSTANT,
                         choices=('DESIGN', 'USER')),
            'tp_dcf':
            HeadDataItem(0.000, '{:>10}', 15, 0, dtype=dt.FLOAT, dps=3),
            'tp0':
            HeadDataItem(0.000, '{:>10}', 15, 1, dtype=dt.FLOAT, dps=3),
            'tpt':
            HeadDataItem(0.000, '{:>10}', 15, 2, dtype=dt.FLOAT, dps=3),
            'dplbar':
            HeadDataItem(0.000, '{:>10}', 15, 3, dtype=dt.FLOAT, dps=3),
            'dpsbar':
            HeadDataItem(0.000, '{:>10}', 15, 4, dtype=dt.FLOAT, dps=3),
            'propwet':
            HeadDataItem(0.000, '{:>10}', 15, 5, dtype=dt.FLOAT, dps=3),
            'up':
            HeadDataItem(0.000, '{:>10}', 15, 6, dtype=dt.FLOAT, dps=3),
            'uk':
            HeadDataItem(0.000, '{:>10}', 15, 7, dtype=dt.FLOAT, dps=3),
            'uh_rows':
            HeadDataItem(0.000, '{:>10}', 16, 0, dtype=dt.INT),
            #             'uh_units': HeadDataItem(0.000, '{:>10}', 14, 9, dtype=dt.INT),        # TODO: Find out what the deal with these is
            #             'uh_fct': HeadDataItem(0.000, '{:>10}', 14, 10, dtype=dt.FLOAT, dps=3),
            'bl_flag':
            HeadDataItem('DESIGN',
                         '{:>10}',
                         17,
                         0,
                         dtype=dt.CONSTANT,
                         choices=('DESIGN', 'USER')),
            'br_flag':
            HeadDataItem('DESIGN',
                         '{:>10}',
                         17,
                         1,
                         dtype=dt.CONSTANT,
                         choices=('DESIGN', 'USER')),
            'bf0_flag':
            HeadDataItem('DESIGN',
                         '{:>10}',
                         17,
                         2,
                         dtype=dt.CONSTANT,
                         choices=('DESIGN', 'USER')),
            'bl_dcf':
            HeadDataItem(0.000, '{:>10}', 18, 0, dtype=dt.FLOAT, dps=3),
            'bl':
            HeadDataItem(0.000, '{:>10}', 18, 1, dtype=dt.FLOAT, dps=3),
            'br_dcf':
            HeadDataItem(0.000, '{:>10}', 18, 2, dtype=dt.FLOAT, dps=3),
            'br':
            HeadDataItem(0.000, '{:>10}', 18, 3, dtype=dt.FLOAT, dps=3),
            'bf0':
            HeadDataItem(0.000, '{:>10}', 18, 4, dtype=dt.FLOAT, dps=3),
        }

        dobjs = [
            # update_callback is called every time a value is added or updated
            do.FloatData(rdt.RAIN, format_str='{:>10}', default=0, no_of_dps=3)
        ]
        dummy_row = {rdt.RAIN: 0}
        self.row_data['main'] = RowDataCollection.bulkInitCollection(dobjs)
        self.row_data['main'].setDummyRow({rdt.RAIN: 0})
Example #13
0
def readTmfFile(datafile):
    """Loads the contents of the Materials CSV file referenced by datafile.
    
    Loads the data from the file referenced by the given TuflowFile object into
    a :class:'rowdatacollection' and a list of comment only lines.
    
    Args:
        datafile(TuflowFile): TuflowFile object with file details.
        
    Return:
        tuple: rowdatacollection, comment_lines(list).
        
    See Also:
        :class:'rowdatacollection'.
    """
    value_separator = ','
    comment_types = ['#', '!']
    tmf_enum = dataobj.TmfEnum()

    path = datafile.absolutePath()
    value_order = range(11)

    row_collection = RowDataCollection()
    row_collection.addToCollection(do.IntData(0, format_str=None, default=''))
    for i in range(1, 11):
        row_collection.addToCollection(
            do.FloatData(i, format_str=', {0}', default='', no_of_dps=3))

    # Keep track of any comment lines and the row numbers as well
    row_collection.addToCollection(
        do.StringData('comment', format_str=' ! {0}', default=''))
    row_collection.addToCollection(
        do.IntData('row_no', format_str=None, default=''))

    contents = []
    logger.info('Loading data file contents from disc - %s' % (path))
    contents = _loadFileFromDisc(path)

    # Stores the comments found in the file
    comment_lines = []

    # Loop through the contents list loaded from file line-by-line.
    first_data_line = False
    row_count = 0
    for i, line in enumerate(contents, 0):

        comment = hasCommentOnlyLine(line, comment_types)
        if comment or comment == '':
            comment_lines.append(comment)

        # If we have a line that isn't a comment or a blank then it is going
        # to contain materials entries.
        else:
            comment_lines.append(None)
            row_collection = _loadRowData(line, row_count, row_collection,
                                          tmf_enum.ITERABLE, comment_types,
                                          value_separator)
            row_count += 1

    # Just need to reset the has_changed variable because it will have been
    # set to True while loading everything in.
    for i in range(0, len(value_order)):
        row_collection.getDataObject(value_order[i]).has_changed = False

    return row_collection, comment_lines
Example #14
0
def readMatSubfile(main_datafile, filename, header_list, args_dict):
    """
    """
    value_separator = ','
    comment_types = ['#', '!']
    mat_subfile_enum = dataobj.SubfileMatEnum()
    path = os.path.join(main_datafile.root, filename)
    root = main_datafile.root

    header1 = 'None'
    header2 = 'None'
    if len(header_list) > 0:
        header1 = header_list[0]
        if len(header_list) > 1:
            header2 = header_list[1]

    def _scanfile(filepath):
        """Scans the file before we do any loading to identify the contents.
        Need to do this because the file can be setup in so many way that it
        becomes a headache to work it out in advance. Better to take a little
        bit of extra processing time and do some quick checks first.
         
        Arguments:
            file_path (str): the path to the subfile.
        
        Return:
            tuple:
                 list: booleans with whether the column contains
                       data that we want or not.
                 int:  length of the cols list.
                 list: containing all of the first row column data
                 int:  first row with usable data on.
        """
        logger.debug('Scanning Materials file - %s' % (filepath))

        with open(filepath, 'rb') as csv_file:

            csv_file = csv.reader(csv_file)

            cols = []
            head_list = []
            start_row = -1
            for i, row in enumerate(csv_file, 0):
                if "".join(row).strip() == "":
                    break

                for j, col in enumerate(row, 0):
                    if i == 0:
                        cols.append(False)
                        head_list = row
                    elif uuf.isNumeric(col):
                        cols[j] = True
                        if start_row == -1:
                            start_row = i
                    elif cols[j] == True:
                        break

        return cols, len(cols), head_list, start_row

    def _loadHeadData(row, row_collection, col_length):
        """
        """
        new_row = [None] * 12

        comment_indices, length = uuf.findSubstringInList('!', row)
        comment_lines.append(None)

        head1_location = -1
        head2_location = -1
        row_length = len(row)
        for i in range(0, col_length):
            if i < row_length:
                entry = row[i].strip()
                if entry == header1:
                    head1_location = i
                if entry == header2:
                    head2_location = i
                row_collection._addValue('actual_header', entry)

        return row_collection, head1_location, head2_location

    def _loadRowData(row, row_count, row_collection, comment_lines, col_length,
                     start_row):
        """Loads the data in a specific row of the file.
        
        Args:
            row(list): containing the row data.
            row_count(int): the current row number.
            required_headers(list): column names that must exist.

        Return:
            rowdatacollection: updated with header row details.
        """
        # Any lines that aren't headers, but are above the first row to contain
        # actual data will be stored as comment lines
        if row_count < start_row:
            comment_lines.append(row)
            return row_collection, comment_lines
        else:
            comment_lines.append(None)

        if '!' in row[-1] or '#' in row[-1]:
            row_collection._addValue('comment', row[-1])

        # Add the row data in the order that it appears in the file
        # from left to right.
        for i in range(col_length):
            if i < len(row):
                row_collection._addValue(i, row[i])

        return row_collection, comment_lines

    try:
        logger.info('Loading data file contents from disc - %s' % (path))
        with open(path, 'rb') as csv_file:
            csv_file = csv.reader(csv_file)

            # Do a quick check of the file setup
            cols, col_length, head_list, start_row = _scanfile(path)

            # First entry doesn't want to have a comma in front when formatting.
            # but all of the others do.
            row_collection = RowDataCollection()
            row_collection.addToCollection(
                do.FloatData(0, format_str=' {0}', default='', no_of_dps=6))
            for i in range(1, len(cols)):
                if cols[i] == True:
                    row_collection.addToCollection(
                        do.FloatData(i,
                                     format_str=', {0}',
                                     default='',
                                     no_of_dps=6))
                else:
                    row_collection.addToCollection(
                        do.StringData(i, format_str=', {0}', default=''))

            row_collection.addToCollection(do.StringData('actual_header',
                                                         format_str='{0}',
                                                         default=''),
                                           index=0)
            row_collection.addToCollection(
                do.IntData('row_no', format_str=None, default=''))

            # Stores the comments found in the file
            comment_lines = []
            first_data_line = False
            # Loop through the contents list loaded from file line-by-line.
            for i, line in enumerate(csv_file, 0):

                comment = hasCommentOnlyLine(''.join(line), comment_types)
                if comment or comment == '':
                    comment_lines.append([comment, i])

                # If we have a line that isn't a comment or a blank then it is going
                # to contain materials entries.
                else:
                    # First non-comment is the headers
                    if first_data_line == False:
                        first_data_line = True
                        row_collection, head1_loc, head2_loc = _loadHeadData(
                            line, row_collection, col_length)
                    else:
                        row_collection, comment_lines = _loadRowData(
                            line, i, row_collection, comment_lines, col_length,
                            start_row)

                    row_collection._addValue('row_no', i)

    except IOError:
        logger.warning('Cannot load file - IOError')
        raise IOError('Cannot load file at: ' + path)

    path_holder = filetools.PathHolder(path, root)
    mat_sub = dataobj.DataFileSubfileMat(path_holder, row_collection,
                                         comment_lines, path_holder.filename,
                                         head1_loc, head2_loc)
    return mat_sub
Example #15
0
def readMatCsvFile(datafile, args_dict={}):
    """Loads the contents of the Materials CSV file referenced by datafile.
    
    Loads the data from the file referenced by the given TuflowFile object into
    a :class:'rowdatacollection' and a list of comment only lines.
    
    Args:
        datafile(TuflowFile): TuflowFile object with file details.
        
    Return:
        tuple: rowdatacollection, comment_lines(list).
        
    See Also:
        :class:'rowdatacollection'.
    """
    value_seperator = ','
    comment_types = ['#', '!']
    csv_enum = dataobj.MatCsvEnum()
    subfile_details = {}

    def _loadHeadData(row, row_collection):
        """
        """
        new_row = [None] * 12

        if '!' in row[-1] or '#' in row[-1]:
            row_collection._addValue('comment', row[-1])

        new_row[0] = row[0]
        new_row[1] = row[1]
        new_row[9] = row[2]
        new_row[11] = row[3]

        row_length = len(new_row)
        for i, v in enumerate(new_row):
            if i < row_length:
                row_collection._addValue('actual_header', new_row[i])

        return row_collection

    def _disectEntry(col_no, entry, new_row):
        """Breaks the row values into the appropriate object values.
        
        The materials file can have Excel style sub-values. i.e. it can have
        seperate columns defined within a bigger one. This function will break
        those values down into a format usable by the values initiated in the
        rowdatacollection.
        
        Args:
            col_no(int): the current column number.
            entry(string): the value of the current column.
            new_row(list): the row values to update.
            
        Return:
            list containing the updated row values.
        
        Note:
            This isn't very nice. Need to clean it up and find a better, safer
            way of dealing with breaking the row data up. It may be excess work
            but perhaps creating an xml converter could work quite will and
            make dealing with the file a bit easier?
        """
        made_change = False

        # Put in ID and Hazard as normal
        if col_no == 0:
            new_row[0] = entry
        elif col_no == 11:
            new_row[11] = entry
        # Possible break up Manning's entry further
        elif col_no == 1:
            # See if there's more than one value in the Manning's category.
            splitval = entry.split(',')

            # If there is and it's numeric then it's a single value for 'n'
            if len(splitval) == 1:
                if uuf.isNumeric(splitval[0]):
                    new_row[1] = splitval[0]

                # Otherwise it's a filename. These can be further separated
                # into two column headers to read from the sub files.
                else:
                    strsplit = splitval[0].split('|')
                    if len(strsplit) == 1:
                        subfile_details[strsplit[0].strip()] = []
                        new_row[6] = strsplit[0].strip()
                    elif len(strsplit) == 2:
                        subfile_details[strsplit[0]] = [strsplit[1].strip()]
                        new_row[6] = strsplit[0].strip()
                        new_row[7] = strsplit[1].strip()
                    else:
                        subfile_details[strsplit[0]] = [
                            strsplit[1].strip(), strsplit[2].strip()
                        ]
                        new_row[6] = strsplit[0].strip()
                        new_row[7] = strsplit[1].strip()
                        new_row[8] = strsplit[2].strip()

            # If there's more than one value then it must be the Manning's
            # depth curve values (N1, Y1, N2, Y2).
            else:
                new_row[2] = splitval[0]
                new_row[3] = splitval[1]
                new_row[4] = splitval[2]
                new_row[5] = splitval[3]

        # Finally grab the infiltration parameters (IL, CL)
        elif col_no == 2:
            splitval = entry.split(',')
            new_row[9] = splitval[0]
            new_row[10] = splitval[1]

        return new_row

    def _loadRowData(row, row_count, row_collection):
        """Loads the data in a specific row of the file.
        
        Args:
            row(list): containing the row data.
            row_count(int): the current row number.
            required_headers(list): column names that must exist.

        Return:
            rowdatacollection: updated with header row details.
        """
        if '!' in row[-1] or '#' in row[-1]:
            row_collection._addValue('comment', row[-1])
        new_row = [None] * 12

        # Add the row data in the order that it appears in the file
        # from left to right.
        for i in csv_enum.ITERABLE:
            if i < len(row):
                new_row = _disectEntry(i, row[i], new_row)

        for val, item in enumerate(new_row):
            row_collection._addValue(val, item)

    # First entry doesn't want to have a comma in front when formatting.
    row_collection = RowDataCollection()
    types = [1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 0]

    # Do the first entry separately because it has a different format string
    row_collection.addToCollection(
        do.StringData(0, format_str='{0}', default=''))
    for i, t in enumerate(types, 1):
        if t == 0:
            row_collection.addToCollection(
                do.StringData(i, format_str=', {0}', default=''))
        else:
            row_collection.addToCollection(
                do.FloatData(i, format_str=', {0}', default='', no_of_dps=3))

    # Add a couple of extra rows to the row_collection for tracking the
    # data in the file.
    row_collection.addToCollection(
        do.StringData('comment', format_str='{0}', default=''))
    row_collection.addToCollection(
        do.StringData('actual_header', format_str='{0}', default=''))
    row_collection.addToCollection(
        do.IntData('row_no', format_str=None, default=''))

    path = datafile.absolutePath()
    try:
        logger.info('Loading data file contents from disc - %s' % (path))
        with open(path, 'rb') as csv_file:
            csv_file = csv.reader(csv_file)

            # Stores the comments found in the file
            comment_lines = []
            first_data_line = False
            line_count = 0

            try:
                # Loop through the contents list loaded from file line-by-line.
                for i, line in enumerate(csv_file, 0):

                    comment = hasCommentOnlyLine(''.join(line), comment_types)
                    if comment or comment == '':
                        comment_lines.append(comment)

                    # If we have a line that isn't a comment or a blank then it is going
                    # to contain materials entries.
                    else:
                        # First non-comment is the headers
                        if first_data_line == False:
                            first_data_line = True
                            _loadHeadData(line, row_collection)
                        else:
                            _loadRowData(line, i, row_collection)

                        row_collection._addValue('row_no', line_count)
                        line_count += 1
                        comment_lines.append(None)
            except IndexError:
                logger.error(
                    'This file is not setup/formatted correctly for a Materials.CSV file:\n'
                    + path)
                raise IndexError(
                    'File is not correctly formatted for a Materials.csv file')
            except AttributeError:
                logger.error(
                    'This file is not setup/formatted correctly for a Materials.CSV file:\n'
                    + path)
                raise AttributeError(
                    'File is not correctly formatted for a Materials.csv file')

    except IOError:
        logger.warning('Cannot load file - IOError')
        raise IOError('Cannot load file at: ' + path)

    # Just need to reset the has_changed variable because it will have been
    # set to True while loading everything in.
    for i in range(0, len(csv_enum.ITERABLE)):
        row_collection.getDataObject(i).has_changed = False

    return row_collection, comment_lines, subfile_details
Example #16
0
def readBcFile(datafile, args_dict={}):
    """Loads the contents of the BC Database file refernced by datafile.
    
    Loads the data from the file referenced by the given TuflowFile object into
    a :class:'rowdatacollection' and a list of comment only lines.
    
    Args:
        datafile(TuflowFile): TuflowFile object with file details.
        
    Return:
        tuple: rowdatacollection, comment_lines(list).
        
    See Also:
        :class:'rowdatacollection'.
    """
    value_seperator = ','
    comment_types = ['#', '!']
    bc_enum = dataobj.BcEnum()
    bc_event_data = args_dict

    def _checkHeaders(row, required_headers):
        """Checks that any required headers can be found.
        
        Reviews the headers in the header row of the csv file to ensure that
        any specifically needed named column headers exist.
        
        Args:
            row(list): columns headers.
            required_headers(list): column names that must be included.
        
        Return:
            list if some headers not found of False otherwise.
        """
        # Check what we have in the header row
        head_check = True
        for r in required_headers:
            if not r in row:
                head_check = False
        if not head_check:
            logger.warning('Required header (' + r + ') not' +
                           'found in file: ' + path)
        return head_check

    def _loadHeadData(row, row_collection, required_headers):
        """Loads the column header data.
        
        Adds the file defined names for the headers to the rowdatacollection.
        
        Args:
            row(list): containing the row data.
            row_collection(rowdatacollection): for updating.
            required_headers(list): column names that must exist.
        
        Return:
            rowdatacollection: updated with header row details.
        """
        row_length = len(row)
        head_check = _checkHeaders(row, required_headers)
        for i, v in enumerate(bc_enum.ITERABLE):
            if i < row_length:
                row_collection._addValue('actual_header', row[i])

        return row_collection

    def _loadRowData(row, row_count, row_collection):
        """Loads the data in a specific row of the file.
        
        Args:
            row(list): containing the row data.
            row_count(int): the current row number.
            required_headers(list): column names that must exist.

        Return:
            rowdatacollection: updated with header row details.
        """
        if '!' in row[-1] or '#' in row[-1]:
            row_collection._addValue('comment', row[-1])

        # Add the row data in the order that it appears in the file
        # from left to right.
        for i in bc_enum.ITERABLE:
            if i < len(row):
                row_collection._addValue(i, row[i])

        return row_collection

    # Initialise the RowDataOjectCollection object with currect setup
    row_collection = RowDataCollection()
    for i, val in enumerate(bc_enum.ITERABLE):
        if i == 0:
            row_collection.addToCollection(
                do.StringData(i, format_str='{0}', default=''))
        else:
            row_collection.addToCollection(
                do.StringData(i, format_str=', {0}', default=''))

    row_collection.addToCollection(do.StringData('actual_header',
                                                 format_str=', {0}',
                                                 default=''),
                                   index=0)
    row_collection.addToCollection(
        do.IntData('row_no', format_str=None, default=''))

    path = datafile.absolutePath()
    required_headers = ['Name', 'Source']
    try:
        logger.info('Loading data file contents from disc - %s' % (path))
        with open(path, 'rU') as csv_file:
            csv_file = csv.reader(csv_file)

            # Stores the comments found in the file
            comment_lines = []
            first_data_line = False
            row_count = 0
            # Loop through the contents list loaded from file line-by-line.
            for i, line in enumerate(csv_file, 0):

                comment = hasCommentOnlyLine(''.join(line), comment_types)
                if comment or comment == '':
                    comment_lines.append(comment)

                # If we have a line that isn't a comment or a blank then it is going
                # to contain materials entries.
                else:
                    # First non-comment is the headers
                    if first_data_line == False:
                        first_data_line = True
                        row_collection = _loadHeadData(line, row_collection,
                                                       required_headers)
                    else:
                        row_collection = _loadRowData(line, i, row_collection)
                        row_collection._addValue('row_no', row_count)
                        row_count += 1

                    comment_lines.append(None)

    except IOError:
        logger.warning('Cannot load file - IOError')
        raise IOError('Cannot load file at: ' + path)

    # Just need to reset the has_changed variable because it will have been
    # set to True while loading everything in.
    for i in range(0, len(bc_enum.ITERABLE)):
        row_collection.dataObject(i).has_changed = False

    return row_collection, comment_lines
Example #17
0
def readBcFile(datafile, args_dict={}):
    """Loads the contents of the BC Database file refernced by datafile.
    
    Loads the data from the file referenced by the given TuflowFile object into
    a :class:'rowdatacollection' and a list of comment only lines.
    
    Args:
        datafile(TuflowFile): TuflowFile object with file details.
        
    Return:
        tuple: rowdatacollection, comment_lines(list).
        
    See Also:
        :class:'rowdatacollection'.
    """
    value_seperator = ','
    comment_types = ['#', '!']
    bc_enum = dataobj.BcEnum()
    bc_event_data = args_dict
    
    def _checkHeaders(row, required_headers):
        """Checks that any required headers can be found.
        
        Reviews the headers in the header row of the csv file to ensure that
        any specifically needed named column headers exist.
        
        Args:
            row(list): columns headers.
            required_headers(list): column names that must be included.
        
        Return:
            list if some headers not found of False otherwise.
        """
        # Check what we have in the header row
        head_check = True
        for r in required_headers:
            if not r in row:
                head_check = False
        if not head_check:
            logger.warning('Required header (' + r + ') not' + 
                'found in file: ' + path)
        return head_check


    def _loadHeadData(row, row_collection, required_headers):
        """Loads the column header data.
        
        Adds the file defined names for the headers to the rowdatacollection.
        
        Args:
            row(list): containing the row data.
            row_collection(rowdatacollection): for updating.
            required_headers(list): column names that must exist.
        
        Return:
            rowdatacollection: updated with header row details.
        """
        row_length = len(row)
        head_check = _checkHeaders(row, required_headers)
        for i, v in enumerate(bc_enum.ITERABLE):
            if i < row_length:
                row_collection._addValue('actual_header', row[i]) 
        
        return row_collection


    def _loadRowData(row, row_count, row_collection): 
        """Loads the data in a specific row of the file.
        
        Args:
            row(list): containing the row data.
            row_count(int): the current row number.
            required_headers(list): column names that must exist.

        Return:
            rowdatacollection: updated with header row details.
        """
        if '!' in row[-1] or '#' in row[-1]:
            row_collection._addValue('comment', row[-1])
        
        # Add the row data in the order that it appears in the file
        # from left to right.
        for i in bc_enum.ITERABLE:
            if i < len(row):
                row_collection._addValue(i, row[i])
        
        return row_collection

    
    # Initialise the RowDataOjectCollection object with currect setup
    row_collection = RowDataCollection()
    for i, val in enumerate(bc_enum.ITERABLE):
        if i == 0:
            row_collection.addToCollection(do.StringData(i, format_str='{0}', default=''))
        else:
            row_collection.addToCollection(do.StringData(i, format_str=', {0}', default=''))
    
    row_collection.addToCollection(do.StringData('actual_header', format_str=', {0}', default=''), index=0)
    row_collection.addToCollection(do.IntData('row_no', format_str=None, default=''))
        
    path = datafile.absolutePath()
    required_headers = ['Name', 'Source']
    try:
        logger.info('Loading data file contents from disc - %s' % (path))
        with open(path, 'rU') as csv_file:
            csv_file = csv.reader(csv_file)
                    

            # Stores the comments found in the file
            comment_lines = []
            first_data_line = False
            row_count = 0
            # Loop through the contents list loaded from file line-by-line.
            for i, line in enumerate(csv_file, 0):
                
                comment = hasCommentOnlyLine(''.join(line), comment_types)
                if comment or comment == '':
                    comment_lines.append(comment)

                # If we have a line that isn't a comment or a blank then it is going
                # to contain materials entries.
                else:
                    # First non-comment is the headers
                    if first_data_line == False:
                        first_data_line = True
                        row_collection = _loadHeadData(line, row_collection, required_headers)
                    else:
                        row_collection = _loadRowData(line, i, row_collection)
                        row_collection._addValue('row_no', row_count)
                        row_count += 1                        
                    
                    comment_lines.append(None)
    
    except IOError:
        logger.warning('Cannot load file - IOError')
        raise IOError ('Cannot load file at: ' + path)
 
    # Just need to reset the has_changed variable because it will have been
    # set to True while loading everything in.
    for i in range(0, len(bc_enum.ITERABLE)):
        row_collection.dataObject(i).has_changed = False
    
    return row_collection, comment_lines
Example #18
0
    def __init__(self, **kwargs): 
        """Constructor.
        """
        AUnit.__init__(self, **kwargs)
        
        self._unit_type = RefhUnit.UNIT_TYPE
        self._unit_category = RefhUnit.UNIT_CATEGORY
#         self.row_data['main'] = []
        if self._name == 'unknown': self._name = 'Refh_unit'

        # Fill in the header values these contain the data at the top of the
        # section, such as the unit name and labels.
        self.head_data = {
            'revision': HeadDataItem(1, '{:<1}', 0, 0, dtype=dt.INT),
            'comment': HeadDataItem('', '', 0, 1, dtype=dt.STRING),
            'z': HeadDataItem(0.000, '{:>10}', 2, 0, dtype=dt.FLOAT, dps=3),
            'easting': HeadDataItem('', '{:>10}', 2, 1, dtype=dt.STRING), 
            'northing': HeadDataItem('', '{:>10}', 2, 2, dtype=dt.STRING),
            'time_delay': HeadDataItem(0.000, '{:>10}', 3, 0, dtype=dt.FLOAT, dps=3),
            'time_step': HeadDataItem(1.0, '{:>10}', 3, 1, dtype=dt.FLOAT, dps=1),
            'bf_only': HeadDataItem('', '{:>10}', 3, 2, dtype=dt.STRING),
            'sc_flag': HeadDataItem('SCALEFACT', '{:<10}', 3, 3, dtype=dt.CONSTANT, choices=('SCALEFACT', 'PEAKVALUE')),
            'scale_factor': HeadDataItem(1.000, '{:>10}', 3, 4, dtype=dt.FLOAT, dps=3),
            'hydrograph_mode': HeadDataItem('HYDROGRAPH', '{:>10}', 3, 5, dtype=dt.CONSTANT, choices=('HYDROGRAPH', 'HYETOGRAPH')),
            'hydrograph_scaling': HeadDataItem('RUNOFF', '{:>10}', 3, 6, dtype=dt.CONSTANT, choices=('RUNOFF', 'FULL')),
            'min_flow': HeadDataItem(1.000, '{:>10}', 3, 7, dtype=dt.FLOAT, dps=3),
            'catchment_area': HeadDataItem(0.00, '{:>10}', 4, 0, dtype=dt.FLOAT, dps=2),
            'saar': HeadDataItem(0, '{:>10}', 4, 1, dtype=dt.INT),
            'urbext': HeadDataItem(0.000, '{:>10}', 4, 2, dtype=dt.FLOAT, dps=3),
            'season': HeadDataItem('DEFAULT', '{:>10}', 4, 3, dtype=dt.CONSTANT, choices=('DEFAULT', 'WINTER', 'SUMMER')),
            'published_report': HeadDataItem('DLL', '{:>10}', 4, 4, dtype=dt.CONSTANT, choices=('DLL', 'REPORT')),
            
            # Urban - only used if 'urban' == 'URBANREFH'
            'urban': HeadDataItem('', '{:>10}', 4, 5, dtype=dt.CONSTANT, choices=('', 'URBANREFH')),
            'subarea_1': HeadDataItem(0.00, '{:>10}', 5, 0, dtype=dt.FLOAT, dps=2),
            'dplbar_1': HeadDataItem(0.000, '{:>10}', 5, 1, dtype=dt.FLOAT, dps=3),
            'suburbext_1': HeadDataItem(0.000, '{:>10}', 5, 2, dtype=dt.FLOAT, dps=3),
            'calibration_1': HeadDataItem(0.000, '{:>10}', 5, 3, dtype=dt.FLOAT, dps=3),
            'subarea_2': HeadDataItem(0.00, '{:>10}', 6, 0, dtype=dt.FLOAT, dps=2),
            'dplbar_2': HeadDataItem(0.000, '{:>10}', 6, 1, dtype=dt.FLOAT, dps=3),
            'suburbext_2': HeadDataItem(0.000, '{:>10}', 6, 2, dtype=dt.FLOAT, dps=3),
            'calibration_2': HeadDataItem(0.000, '{:>10}', 6, 3, dtype=dt.FLOAT, dps=3),
            'subrunoff_2': HeadDataItem(0.000, '{:>10}', 6, 4, dtype=dt.FLOAT, dps=3),
            'sewer_rp_2': HeadDataItem('RUNOFF', '{:>10}', 6, 5, dtype=dt.CONSTANT, choices=('RUNOFF', 'DEPTH')),
            'sewer_depth_2': HeadDataItem(0.000, '{:>10}', 6, 6, dtype=dt.FLOAT, dps=3),
            'sewer_lossvolume_2': HeadDataItem('VOLUME', '{:>10}', 6, 7, dtype=dt.CONSTANT, choices=('VOLUME', 'FLOW')),
            'subarea_3': HeadDataItem(0.00, '{:>10}', 7, 0, dtype=dt.FLOAT, dps=2),
            'dplbar_3': HeadDataItem(0.000, '{:>10}', 7, 1, dtype=dt.FLOAT, dps=3),
            'suburbext_3': HeadDataItem(0.000, '{:>10}', 7, 2, dtype=dt.FLOAT, dps=3),
            'calibration_3': HeadDataItem(0.000, '{:>10}', 7, 3, dtype=dt.FLOAT, dps=3),
            'subrunoff_3': HeadDataItem(0.000, '{:>10}', 7, 4, dtype=dt.FLOAT, dps=3),
            
            'storm_area': HeadDataItem(0.00, '{:>10}', 8, 0, dtype=dt.FLOAT, dps=2),
            'storm_duration': HeadDataItem(0.000, '{:>10}', 8, 1, dtype=dt.FLOAT, dps=3),
            'sn_rate': HeadDataItem(0.000, '{:>10}', 8, 2, dtype=dt.FLOAT, dps=3),
            'rainfall_flag': HeadDataItem('DESIGN', '{:>10}', 9, 0, dtype=dt.CONSTANT, choices=('DESIGN', 'USER')),
            'arf_flag': HeadDataItem('DESIGN', '{:>10}', 9, 1, dtype=dt.CONSTANT, choices=('DESIGN', 'USER')),
            'rainfall_comment': HeadDataItem('', '', 9, 2, dtype=dt.STRING),
            'rainfall_odepth': HeadDataItem(0.000, '{:>10}', 10, 0, dtype=dt.FLOAT, dps=3),
            'return_period': HeadDataItem(0, '{:>10}', 10, 1, dtype=dt.INT),
            'arf': HeadDataItem(0.000, '{:>10}', 10, 2, dtype=dt.FLOAT, dps=3),
            'c': HeadDataItem(0.000, '{:>10}', 10, 3, dtype=dt.FLOAT, dps=3),
            'd1': HeadDataItem(0.000, '{:>10}', 10, 4, dtype=dt.FLOAT, dps=3),
            'd2': HeadDataItem(0.000, '{:>10}', 10, 5, dtype=dt.FLOAT, dps=3),
            'd2': HeadDataItem(0.000, '{:>10}', 10, 6, dtype=dt.FLOAT, dps=3),
            'd3': HeadDataItem(0.000, '{:>10}', 10, 7, dtype=dt.FLOAT, dps=3),
            'e': HeadDataItem(0.000, '{:>10}', 10, 8, dtype=dt.FLOAT, dps=3),
            'f': HeadDataItem(0.000, '{:>10}', 10, 9, dtype=dt.FLOAT, dps=3),
            'rp_flag': HeadDataItem('DESIGN', '{:>10}', 11, 0, dtype=dt.CONSTANT, choices=('DESIGN', 'USER')),
            'scf_flag': HeadDataItem('DESIGN', '{:>10}', 11, 1, dtype=dt.CONSTANT, choices=('DESIGN', 'USER')),
            'scf': HeadDataItem(0.000, '{:>10}', 11, 2, dtype=dt.FLOAT, dps=3),
            'use_refined_rainfall': HeadDataItem('0', '{:>10}', 11, 3, dtype=dt.CONSTANT, choices=('0', '1')),

            'cmax_flag': HeadDataItem('DESIGN', '{:>10}', 12, 0, dtype=dt.CONSTANT, choices=('DESIGN', 'USER')),
            'cini_flag': HeadDataItem('DESIGN', '{:>10}', 12, 1, dtype=dt.CONSTANT, choices=('DESIGN', 'USER')),
            'alpha_flag': HeadDataItem('DESIGN', '{:>10}', 12, 2, dtype=dt.CONSTANT, choices=('DESIGN', 'USER')),
            'models_comment': HeadDataItem('', '{}', 12, 3, dtype=dt.STRING),
            'cm_dcf': HeadDataItem(0.000, '{:>10}', 13, 0, dtype=dt.FLOAT, dps=3),
            'cmax': HeadDataItem(0.000, '{:>10}', 13, 1, dtype=dt.FLOAT, dps=3),
            'cini': HeadDataItem(0.000, '{:>10}', 13, 2, dtype=dt.FLOAT, dps=3),
            'alpha': HeadDataItem(0.000, '{:>10}', 13, 3, dtype=dt.FLOAT, dps=3),
            'bfihost': HeadDataItem(0.000, '{:>10}', 13, 4, dtype=dt.FLOAT, dps=3),
            'uh_flag': HeadDataItem('DESIGN', '{:>10}', 14, 0, dtype=dt.CONSTANT, choices=('DESIGN', 'USER')),
            'tp_flag': HeadDataItem('DESIGN', '{:>10}', 14, 1, dtype=dt.CONSTANT, choices=('DESIGN', 'USER')),
            'up_flag': HeadDataItem('DESIGN', '{:>10}', 14, 3, dtype=dt.CONSTANT, choices=('DESIGN', 'USER')),
            'uk_flag': HeadDataItem('DESIGN', '{:>10}', 14, 4, dtype=dt.CONSTANT, choices=('DESIGN', 'USER')),
            'tp_dcf': HeadDataItem(0.000, '{:>10}', 15, 0, dtype=dt.FLOAT, dps=3),
            'tp0': HeadDataItem(0.000, '{:>10}', 15, 1, dtype=dt.FLOAT, dps=3),
            'tpt': HeadDataItem(0.000, '{:>10}', 15, 2, dtype=dt.FLOAT, dps=3),
            'dplbar': HeadDataItem(0.000, '{:>10}', 15, 3, dtype=dt.FLOAT, dps=3),
            'dpsbar': HeadDataItem(0.000, '{:>10}', 15, 4, dtype=dt.FLOAT, dps=3),
            'propwet': HeadDataItem(0.000, '{:>10}', 15, 5, dtype=dt.FLOAT, dps=3),
            'up': HeadDataItem(0.000, '{:>10}', 15, 6, dtype=dt.FLOAT, dps=3),
            'uk': HeadDataItem(0.000, '{:>10}', 15, 7, dtype=dt.FLOAT, dps=3),
            'uh_rows': HeadDataItem(0.000, '{:>10}', 16, 0, dtype=dt.INT),
#             'uh_units': HeadDataItem(0.000, '{:>10}', 14, 9, dtype=dt.INT),        # TODO: Find out what the deal with these is
#             'uh_fct': HeadDataItem(0.000, '{:>10}', 14, 10, dtype=dt.FLOAT, dps=3),
            'bl_flag': HeadDataItem('DESIGN', '{:>10}', 17, 0, dtype=dt.CONSTANT, choices=('DESIGN', 'USER')),
            'br_flag': HeadDataItem('DESIGN', '{:>10}', 17, 1, dtype=dt.CONSTANT, choices=('DESIGN', 'USER')),
            'br0_flag': HeadDataItem('DESIGN', '{:>10}', 17, 2, dtype=dt.CONSTANT, choices=('DESIGN', 'USER')),
            'bl_dcf': HeadDataItem(0.000, '{:>10}', 18, 0, dtype=dt.FLOAT, dps=3),
            'bl': HeadDataItem(0.000, '{:>10}', 18, 1, dtype=dt.FLOAT, dps=3),
            'br_dcf': HeadDataItem(0.000, '{:>10}', 18, 2, dtype=dt.FLOAT, dps=3),
            'br': HeadDataItem(0.000, '{:>10}', 18, 3, dtype=dt.FLOAT, dps=3),
            'bf0': HeadDataItem(0.000, '{:>10}', 18, 4, dtype=dt.FLOAT, dps=3),
        }
        
        dobjs = [
            # update_callback is called every time a value is added or updated
            do.FloatData(rdt.RAIN, format_str='{:>10}', default=0, no_of_dps=3)
        ]
        dummy_row = {rdt.RAIN: 0}
        self.row_data['main'] = RowDataCollection.bulkInitCollection(dobjs)
        self.row_data['main'].setDummyRow({rdt.RAIN: 0})
Example #19
0
def readMatCsvFile(datafile, args_dict={}):
    """Loads the contents of the Materials CSV file referenced by datafile.
    
    Loads the data from the file referenced by the given TuflowFile object into
    a :class:'rowdatacollection' and a list of comment only lines.
    
    Args:
        datafile(TuflowFile): TuflowFile object with file details.
        
    Return:
        tuple: rowdatacollection, comment_lines(list).
        
    See Also:
        :class:'rowdatacollection'.
    """
    value_seperator = ','
    comment_types = ['#', '!']
    csv_enum = dataobj.MatCsvEnum()
    subfile_details= {}

    def _loadHeadData(row, row_collection):
        """
        """
        new_row = [None] * 12

        if '!' in row[-1] or '#' in row[-1]:
            row_collection._addValue('comment', row[-1])
            
        new_row[0] = row[0]
        new_row[1] = row[1]
        new_row[9] = row[2]
        new_row[11] = row[3]

        row_length = len(new_row)
        for i, v in enumerate(new_row):
            if i < row_length:
                row_collection._addValue('actual_header', new_row[i]) 
        
        return row_collection

    
    def _disectEntry(col_no, entry, new_row):
        """Breaks the row values into the appropriate object values.
        
        The materials file can have Excel style sub-values. i.e. it can have
        seperate columns defined within a bigger one. This function will break
        those values down into a format usable by the values initiated in the
        rowdatacollection.
        
        Args:
            col_no(int): the current column number.
            entry(string): the value of the current column.
            new_row(list): the row values to update.
            
        Return:
            list containing the updated row values.
        
        Note:
            This isn't very nice. Need to clean it up and find a better, safer
            way of dealing with breaking the row data up. It may be excess work
            but perhaps creating an xml converter could work quite will and
            make dealing with the file a bit easier?
        """
        made_change = False

        # Put in ID and Hazard as normal
        if col_no == 0:
            new_row[0] = entry
        elif col_no == 11:
            new_row[11] = entry
        # Possible break up Manning's entry further
        elif col_no == 1:
            # See if there's more than one value in the Manning's category.
            splitval = entry.split(',')
             
            # If there is and it's numeric then it's a single value for 'n'
            if len(splitval) == 1:
                if uuf.isNumeric(splitval[0]):
                    new_row[1] = splitval[0]
             
                # Otherwise it's a filename. These can be further separated 
                # into two column headers to read from the sub files.
                else:
                    strsplit = splitval[0].split('|')
                    if len(strsplit) == 1:
                        subfile_details[strsplit[0].strip()] = []
                        new_row[6] = strsplit[0].strip()
                    elif len(strsplit) == 2:
                        subfile_details[strsplit[0]] = [strsplit[1].strip()]
                        new_row[6] = strsplit[0].strip()
                        new_row[7] = strsplit[1].strip()
                    else:
                        subfile_details[strsplit[0]] = [strsplit[1].strip(), strsplit[2].strip()]
                        new_row[6] = strsplit[0].strip()
                        new_row[7] = strsplit[1].strip()
                        new_row[8] = strsplit[2].strip()
                          
            # If there's more than one value then it must be the Manning's
            # depth curve values (N1, Y1, N2, Y2).
            else:
                new_row[2] = splitval[0]
                new_row[3] = splitval[1]
                new_row[4] = splitval[2]
                new_row[5] = splitval[3]

        # Finally grab the infiltration parameters (IL, CL)
        elif col_no == 2:
            splitval = entry.split(',')
            new_row[9] = splitval[0]
            new_row[10] = splitval[1]
        
        
        return new_row


    def _loadRowData(row, row_count, row_collection): 
        """Loads the data in a specific row of the file.
        
        Args:
            row(list): containing the row data.
            row_count(int): the current row number.
            required_headers(list): column names that must exist.

        Return:
            rowdatacollection: updated with header row details.
        """
        if '!' in row[-1] or '#' in row[-1]:
            row_collection._addValue('comment', row[-1])
        new_row = [None] * 12
        
        # Add the row data in the order that it appears in the file
        # from left to right.
        for i in csv_enum.ITERABLE:
            if i < len(row):
                new_row = _disectEntry(i, row[i], new_row)
        
        for val, item in enumerate(new_row):
            row_collection._addValue(val, item)
        

    # First entry doesn't want to have a comma in front when formatting.
    row_collection = RowDataCollection()
    types = [1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 0]
    
    # Do the first entry separately because it has a different format string
    row_collection.addToCollection(do.StringData(0, format_str='{0}', default=''))
    for i, t in enumerate(types, 1):
        if t == 0:
            row_collection.addToCollection(do.StringData(i, format_str=', {0}', default=''))
        else:
            row_collection.addToCollection(do.FloatData(i, format_str=', {0}', default='', no_of_dps=3))

    # Add a couple of extra rows to the row_collection for tracking the
    # data in the file. 
    row_collection.addToCollection(do.StringData('comment', format_str='{0}', default=''))
    row_collection.addToCollection(do.StringData('actual_header', format_str='{0}', default=''))
    row_collection.addToCollection(do.IntData('row_no', format_str=None, default=''))

    path = datafile.absolutePath()
    try:
        logger.info('Loading data file contents from disc - %s' % (path))
        with open(path, 'rb') as csv_file:
            csv_file = csv.reader(csv_file)
                    

            # Stores the comments found in the file
            comment_lines = []
            first_data_line = False
            line_count = 0
            
            try:
                # Loop through the contents list loaded from file line-by-line.
                for i, line in enumerate(csv_file, 0):
                    
                    comment = hasCommentOnlyLine(''.join(line), comment_types)
                    if comment or comment == '':
                        comment_lines.append(comment)

                    # If we have a line that isn't a comment or a blank then it is going
                    # to contain materials entries.
                    else:
                        # First non-comment is the headers
                        if first_data_line == False:
                            first_data_line = True
                            _loadHeadData(line, row_collection)
                        else:
                            _loadRowData(line, i, row_collection)
                        
                        row_collection._addValue('row_no', line_count)
                        line_count += 1
                        comment_lines.append(None)
            except IndexError:
                logger.error('This file is not setup/formatted correctly for a Materials.CSV file:\n' + path)
                raise IndexError ('File is not correctly formatted for a Materials.csv file')
            except AttributeError:
                logger.error('This file is not setup/formatted correctly for a Materials.CSV file:\n' + path)
                raise AttributeError ('File is not correctly formatted for a Materials.csv file')
    
    except IOError:
        logger.warning('Cannot load file - IOError')
        raise IOError ('Cannot load file at: ' + path)

    # Just need to reset the has_changed variable because it will have been
    # set to True while loading everything in.
    for i in range(0, len(csv_enum.ITERABLE)):
        row_collection.getDataObject(i).has_changed = False
    
    return row_collection, comment_lines, subfile_details
Example #20
0
    def __init__(self, **kwargs):
        """Constructor.

        Args:
            fileOrder (int): The location of this unit in the file.
        """
        super(HtbdyUnit, self).__init__(**kwargs)

        self._unit_type = HtbdyUnit.UNIT_TYPE
        self._unit_category = HtbdyUnit.UNIT_CATEGORY
        self._name = 'Htbd'

        time_units = (
            'SECONDS',
            'MINUTES',
            'HOURS',
            'DAYS',
            'WEEKS',
            'FORTNIGHTS',
            'LUNAR MONTHS',
            'MONTHS',
            'QUARTERS',
            'YEARS',
            'DECADES',
            'USER SET',
        )
        self.head_data = {
            'comment':
            HeadDataItem('', '', 0, 1, dtype=dt.STRING),
            'multiplier':
            HeadDataItem(1.000, '{:>10}', 0, 1, dtype=dt.FLOAT, dps=3),
            'time_units':
            HeadDataItem('HOURS',
                         '{:>10}',
                         2,
                         0,
                         dtype=dt.CONSTANT,
                         choices=time_units),
            'extending_method':
            HeadDataItem('EXTEND',
                         '{:>10}',
                         2,
                         0,
                         dtype=dt.CONSTANT,
                         choices=('EXTEND', 'NOEXTEND', 'REPEAT')),
            'interpolation':
            HeadDataItem('LINEAR',
                         '{:>10}',
                         2,
                         0,
                         dtype=dt.CONSTANT,
                         choices=('LINEAR', 'SPLINE')),
        }

        dobjs = [
            do.FloatData(rdt.ELEVATION, format_str='{:>10}', no_of_dps=3),
            do.FloatData(rdt.TIME,
                         format_str='{:>10}',
                         no_of_dps=3,
                         update_callback=self.checkIncreases),
        ]
        self.row_data['main'] = RowDataCollection.bulkInitCollection(dobjs)
        self.row_data['main'].setDummyRow({rdt.TIME: 0, rdt.ELEVATION: 0})
Example #21
0
def readMatSubfile(main_datafile, filename, header_list, args_dict): 
    """
    """
    value_separator = ','
    comment_types = ['#', '!']
    mat_subfile_enum = dataobj.SubfileMatEnum()
    path = os.path.join(main_datafile.root, filename)
    root = main_datafile.root
    
    header1 = 'None'
    header2 = 'None'
    if len(header_list) > 0:
        header1 = header_list[0]
        if len(header_list) > 1:
            header2 = header_list[1]


    def _scanfile(filepath):
        """Scans the file before we do any loading to identify the contents.
        Need to do this because the file can be setup in so many way that it
        becomes a headache to work it out in advance. Better to take a little
        bit of extra processing time and do some quick checks first.
         
        Arguments:
            file_path (str): the path to the subfile.
        
        Return:
            tuple:
                 list: booleans with whether the column contains
                       data that we want or not.
                 int:  length of the cols list.
                 list: containing all of the first row column data
                 int:  first row with usable data on.
        """ 
        logger.debug('Scanning Materials file - %s' 
                                        % (filepath))
             
        with open(filepath, 'rb') as csv_file:
             
            csv_file = csv.reader(csv_file)
             
            cols = []
            head_list = []
            start_row = -1
            for i, row in enumerate(csv_file, 0): 
                if "".join(row).strip() == "":
                    break
 
                for j, col in enumerate(row, 0):
                    if i == 0:
                        cols.append(False)
                        head_list = row
                    elif uuf.isNumeric(col):
                        cols[j] = True
                        if start_row == -1:
                            start_row = i
                    elif cols[j] == True:
                        break
         
        return cols, len(cols), head_list, start_row
    
    
    def _loadHeadData(row, row_collection, col_length):
        """
        """
        new_row = [None] * 12

        comment_indices, length = uuf.findSubstringInList('!', row)  
        comment_lines.append(None)

        head1_location = -1
        head2_location = -1
        row_length = len(row)
        for i in range(0, col_length):
            if i < row_length:
                entry = row[i].strip()
                if entry == header1:
                    head1_location = i
                if entry == header2:
                    head2_location = i
                row_collection._addValue('actual_header', entry) 
        
        return row_collection, head1_location, head2_location
    

    def _loadRowData(row, row_count, row_collection, comment_lines, col_length, start_row): 
        """Loads the data in a specific row of the file.
        
        Args:
            row(list): containing the row data.
            row_count(int): the current row number.
            required_headers(list): column names that must exist.

        Return:
            rowdatacollection: updated with header row details.
        """
        # Any lines that aren't headers, but are above the first row to contain
        # actual data will be stored as comment lines
        if row_count < start_row:
            comment_lines.append(row)
            return row_collection, comment_lines
        else:
            comment_lines.append(None)
            
        if '!' in row[-1] or '#' in row[-1]:
            row_collection._addValue('comment', row[-1])
        
        # Add the row data in the order that it appears in the file
        # from left to right.
        for i in range(col_length):
            if i < len(row):
                row_collection._addValue(i, row[i])
        
        return row_collection, comment_lines
    
    
    try:
        logger.info('Loading data file contents from disc - %s' % (path))
        with open(path, 'rb') as csv_file:
            csv_file = csv.reader(csv_file)

            # Do a quick check of the file setup
            cols, col_length, head_list, start_row = _scanfile(path)
            
            # First entry doesn't want to have a comma in front when formatting.
            # but all of the others do.
            row_collection = RowDataCollection()
            row_collection.addToCollection(do.FloatData(0, format_str=' {0}', default='', no_of_dps=6))
            for i in range(1, len(cols)):
                if cols[i] == True:
                    row_collection.addToCollection(do.FloatData(i, format_str=', {0}', default='', no_of_dps=6))
                else:
                    row_collection.addToCollection(do.StringData(i, format_str=', {0}', default=''))
                    
            row_collection.addToCollection(do.StringData('actual_header', format_str='{0}', default=''), index=0)
            row_collection.addToCollection(do.IntData('row_no', format_str=None, default=''))

            # Stores the comments found in the file
            comment_lines = []
            first_data_line = False
            # Loop through the contents list loaded from file line-by-line.
            for i, line in enumerate(csv_file, 0):
                
                comment = hasCommentOnlyLine(''.join(line), comment_types)
                if comment or comment == '':
                    comment_lines.append([comment, i])

                # If we have a line that isn't a comment or a blank then it is going
                # to contain materials entries.
                else:                    
                    # First non-comment is the headers
                    if first_data_line == False:
                        first_data_line = True
                        row_collection, head1_loc, head2_loc = _loadHeadData(line, row_collection, col_length)
                    else:
                        row_collection, comment_lines = _loadRowData(line, i, row_collection, comment_lines, col_length, start_row)
                    
                    row_collection._addValue('row_no', i)
    
    except IOError:
        logger.warning('Cannot load file - IOError')
        raise IOError ('Cannot load file at: ' + path)
    
    path_holder = filetools.PathHolder(path, root)
    mat_sub = dataobj.DataFileSubfileMat(path_holder, row_collection, comment_lines, 
                                         path_holder.filename, head1_loc,
                                         head2_loc)
    return mat_sub
Example #22
0
def readTmfFile(datafile):
    """Loads the contents of the Materials CSV file referenced by datafile.
    
    Loads the data from the file referenced by the given TuflowFile object into
    a :class:'rowdatacollection' and a list of comment only lines.
    
    Args:
        datafile(TuflowFile): TuflowFile object with file details.
        
    Return:
        tuple: rowdatacollection, comment_lines(list).
        
    See Also:
        :class:'rowdatacollection'.
    """
    value_separator = ','
    comment_types = ['#', '!']
    tmf_enum = dataobj.TmfEnum()

    path = datafile.absolutePath()
    value_order = range(11)
    
    row_collection = RowDataCollection()
    row_collection.addToCollection(do.IntData(0, format_str=None, default=''))
    for i in range(1, 11):
        row_collection.addToCollection(do.FloatData(i, format_str=', {0}', default='', no_of_dps=3))

    # Keep track of any comment lines and the row numbers as well
    row_collection.addToCollection(do.StringData('comment', format_str=' ! {0}', default=''))
    row_collection.addToCollection(do.IntData('row_no', format_str=None, default=''))
    
    contents = []
    logger.info('Loading data file contents from disc - %s' % (path))
    contents = _loadFileFromDisc(path)
                    
    # Stores the comments found in the file
    comment_lines = []
    
    # Loop through the contents list loaded from file line-by-line.
    first_data_line = False
    row_count = 0
    for i, line in enumerate(contents, 0):
        
        comment = hasCommentOnlyLine(line, comment_types)
        if comment or comment == '':
            comment_lines.append(comment)

        # If we have a line that isn't a comment or a blank then it is going
        # to contain materials entries.
        else:
            comment_lines.append(None)
            row_collection = _loadRowData(line, row_count, row_collection, tmf_enum.ITERABLE, 
                                            comment_types, value_separator)
            row_count += 1
    
    # Just need to reset the has_changed variable because it will have been
    # set to True while loading everything in.
    for i in range(0, len(value_order)):
        row_collection.getDataObject(value_order[i]).has_changed = False
    
    return row_collection, comment_lines