Beispiel #1
0
    def build(self, baseline_dir, target_path):
        """
        build a complete INP file with the build instructions committed to a
        baseline model.
        """
        basemodel = swmmio.Model(baseline_dir)
        allheaders = funcs.complete_inp_headers(basemodel.inp.path)
        #new_inp = os.path.join(target_dir, 'model.inp')
        with open (target_path, 'w') as f:
            for section in allheaders['order']:

                #check if the section is not in problem_sections and there are changes
                #in self.instructions and commit changes to it from baseline accordingly
                if (section not in problem_sections
                    and allheaders['headers'][section] != 'blob'
                    and section in self.instructions):

                    #df of baseline model section
                    basedf = create_dataframeINP(basemodel.inp.path, section)

                    #grab the changes to
                    changes = self.instructions[section]

                    #remove elements that have alterations and or tagged for removal
                    remove_ids = changes.removed.index | changes.altered.index
                    new_section = basedf.drop(remove_ids)

                    #add elements
                    new_section = pd.concat([new_section, changes.altered, changes.added])
                else:
                    #section is not well understood or is problematic, just blindly copy
                    new_section = create_dataframeINP(basemodel.inp.path, section=section)

                #write the section
                vc_utils.write_inp_section(f, allheaders, section, new_section)
Beispiel #2
0
    def __call__(self):

        """
        collect all useful and available data related to the conduits and
        organize in one dataframe.
        >>> model = swmmio.Model(MODEL_FULL_FEATURES__NET_PATH)
        >>> conduits_section = ModelSection(model, 'conduits')
        >>> conduits_section()
        """

        # create dataframes of relevant sections from the INP
        for ix, sect in enumerate(self.config['inp_sections']):
            if ix == 0:
                df = create_dataframeINP(self.inp.path, sect, comment_cols=False)
            else:
                df_other = create_dataframeINP(self.inp.path, sect, comment_cols=False)
                df = df.join(df_other)

        if self.rpt:
            for rpt_sect in self.config['rpt_sections']:
                df = df.join(create_dataframeRPT(self.rpt.path, rpt_sect))

        # add conduit coordinates
        xys = df.apply(lambda r: get_link_coords(r, self.inp.coordinates, self.inp.vertices), axis=1)
        df = df.assign(coords=xys.map(lambda x: x[0]))

        # make inlet/outlet node IDs string type
        df.InletNode = df.InletNode.astype(str)
        df.OutletNode = df.OutletNode.astype(str)

        return df
Beispiel #3
0
    def __call__(self):

        """
        collect all useful and available data related to the conduits and
        organize in one dataframe.
        >>> model = swmmio.Model(MODEL_FULL_FEATURES__NET_PATH)
        >>> conduits_section = ModelSection(model, 'conduits')
        >>> conduits_section()
        """

        # create dataframes of relevant sections from the INP
        for ix, sect in enumerate(self.config['inp_sections']):
            if ix == 0:
                df = create_dataframeINP(self.inp.path, sect, comment_cols=False)
            else:
                df_other = create_dataframeINP(self.inp.path, sect, comment_cols=False)
                df = df.join(df_other)

        if df.empty:
            return df

        # if there is an RPT available, grab relevant sections
        if self.rpt:
            for rpt_sect in self.config['rpt_sections']:
                df = df.join(create_dataframeRPT(self.rpt.path, rpt_sect))

        # add conduit coordinates
        xys = df.apply(lambda r: get_link_coords(r, self.inp.coordinates, self.inp.vertices), axis=1)
        df = df.assign(coords=xys.map(lambda x: x[0]))

        # make inlet/outlet node IDs string type
        df.InletNode = df.InletNode.astype(str)
        df.OutletNode = df.OutletNode.astype(str)

        return df
Beispiel #4
0
    def conduits(self):
        """
        collect all useful and available data related model conduits and
        organize in one dataframe.
        """

        # check if this has been done already and return that data accordingly
        if self._conduits_df is not None:
            return self._conduits_df

        # parse out the main objects of this model
        inp = self.inp
        rpt = self.rpt

        # create dataframes of relevant sections from the INP
        conduits_df = create_dataframeINP(inp.path,
                                          "[CONDUITS]",
                                          comment_cols=False)
        xsections_df = create_dataframeINP(inp.path,
                                           "[XSECTIONS]",
                                           comment_cols=False)
        conduits_df = conduits_df.join(xsections_df)

        if rpt:
            # create a dictionary holding data from an rpt file, if provided
            link_flow_df = create_dataframeRPT(rpt.path, "Link Flow Summary")
            conduits_df = conduits_df.join(link_flow_df)

        # add conduit coordinates
        xys = conduits_df.apply(lambda r: get_link_coords(
            r, self.inp.coordinates, self.inp.vertices),
                                axis=1)
        df = conduits_df.assign(coords=xys.map(lambda x: x[0]))

        # add conduit up/down inverts and calculate slope
        elevs = self.nodes()[['InvertElev']]
        df = pd.merge(df,
                      elevs,
                      left_on='InletNode',
                      right_index=True,
                      how='left')
        df = df.rename(index=str, columns={"InvertElev": "InletNodeInvert"})
        df = pd.merge(df,
                      elevs,
                      left_on='OutletNode',
                      right_index=True,
                      how='left')
        df = df.rename(index=str, columns={"InvertElev": "OutletNodeInvert"})
        df['UpstreamInvert'] = df.InletNodeInvert + df.InletOffset
        df['DownstreamInvert'] = df.OutletNodeInvert + df.OutletOffset
        df['SlopeFtPerFt'] = (df.UpstreamInvert -
                              df.DownstreamInvert) / df.Length

        df.InletNode = df.InletNode.astype(str)
        df.OutletNode = df.OutletNode.astype(str)

        self._conduits_df = df

        return df
Beispiel #5
0
    def __init__(self,
                 model1=None,
                 model2=None,
                 section='[JUNCTIONS]',
                 build_instr_file=None):

        if model1 and model2:
            df1 = create_dataframeINP(model1.inp, section)
            df2 = create_dataframeINP(model2.inp, section)

            #BUG -> this fails if a df1 or df2 is None i.e. if a section doesn't exist in one model
            added_ids = df2.index.difference(df1.index)
            removed_ids = df1.index.difference(df2.index)

            #find where elements were changed (but kept with same ID)
            common_ids = df1.index.difference(
                removed_ids)  #original - removed = in common
            #both dfs concatenated, with matched indices for each element
            full_set = pd.concat([df1.ix[common_ids], df2.ix[common_ids]])
            #drop dupes on the set, all things that did not changed should have 1 row
            changes_with_dupes = full_set.drop_duplicates()
            #duplicate indicies are rows that have changes, isolate these
            changed_ids = changes_with_dupes.index.get_duplicates()

            added = df2.ix[added_ids]
            added[
                'Comment'] = 'Added'  # from model {}'.format(model2.inp.filePath)
            added['Origin'] = model2.inp.filePath

            altered = df2.ix[changed_ids]
            altered[
                'Comment'] = 'Altered'  # in model {}'.format(model2.inp.filePath)
            altered['Origin'] = model2.inp.filePath

            removed = df1.ix[removed_ids]
            #comment out the removed elements
            #removed.index = ["; " + str(x) for x in removed.index]
            removed[
                'Comment'] = 'Removed'  # in model {}'.format(model2.inp.filePath)
            removed['Origin'] = model2.inp.filePath

            self.old = df1
            self.new = df2
            self.added = added
            self.removed = removed
            self.altered = altered

        if build_instr_file:
            #if generating from a build instructions file, do this (more efficient)
            df = create_dataframeBI(build_instr_file, section=section)

            self.added = df.loc[df['Comment'] == 'Added']
            self.removed = df.loc[df['Comment'] == 'Removed']
            self.altered = df.loc[df['Comment'] == 'Altered']
Beispiel #6
0
    def nodes(self, bbox=None, subset=None):
        """
        collect all useful and available data related model nodes and organize
        in one dataframe.
        """

        # check if this has been done already and return that data accordingly
        if self._nodes_df is not None and bbox == self.bbox:
            return self._nodes_df

        # parse out the main objects of this model
        inp = self.inp
        rpt = self.rpt

        # create dataframes of relevant sections from the INP
        juncs_df = create_dataframeINP(inp.path, "[JUNCTIONS]")
        outfalls_df = create_dataframeINP(inp.path, "[OUTFALLS]")
        storage_df = create_dataframeINP(inp.path, "[STORAGE]")

        # concatenate the DFs and keep only relevant cols
        all_nodes = pd.concat([juncs_df, outfalls_df, storage_df])
        cols = ['InvertElev', 'MaxDepth', 'SurchargeDepth', 'PondedArea']
        all_nodes = all_nodes[cols]

        if rpt:
            # add results data if a rpt file was found
            depth_summ = create_dataframeRPT(rpt.path, "Node Depth Summary")
            flood_summ = create_dataframeRPT(rpt.path, "Node Flooding Summary")

            # join the rpt data (index on depth df, suffixes for common cols)
            rpt_df = depth_summ.join(flood_summ,
                                     lsuffix='_depth',
                                     rsuffix='_flood')
            all_nodes = all_nodes.join(rpt_df)  # join to the all_nodes df

        all_nodes = all_nodes.join(self.inp.coordinates[['X', 'Y']])

        def nodexy(row):
            if math.isnan(row.X) or math.isnan(row.Y):
                return None
            else:
                return [(row.X, row.Y)]

        xys = all_nodes.apply(lambda r: nodexy(r), axis=1)
        all_nodes = all_nodes.assign(coords=xys)
        all_nodes = all_nodes.rename(index=str)
        self._nodes_df = all_nodes

        return all_nodes
Beispiel #7
0
    def nodes(self, bbox=None, subset=None):
        """
        collect all useful and available data related model nodes and organize
        in one dataframe.
        """

        # check if this has been done already and return that data accordingly
        if self._nodes_df is not None and bbox == self.bbox:
            return self._nodes_df

        # parse out the main objects of this model
        inp = self.inp
        rpt = self.rpt

        # create dataframes of relevant sections from the INP
        juncs_df = create_dataframeINP(inp.path, "[JUNCTIONS]")
        outfalls_df = create_dataframeINP(inp.path, "[OUTFALLS]")
        storage_df = create_dataframeINP(inp.path, "[STORAGE]")

        # concatenate the DFs and keep only relevant cols
        all_nodes = pd.concat([juncs_df, outfalls_df, storage_df])
        cols = ['InvertElev', 'MaxDepth', 'SurchargeDepth', 'PondedArea']
        all_nodes = all_nodes[cols]

        if rpt:
            # add results data if a rpt file was found
            depth_summ = create_dataframeRPT(rpt.path, "Node Depth Summary")
            flood_summ = create_dataframeRPT(rpt.path, "Node Flooding Summary")

            # join the rpt data (index on depth df, suffixes for common cols)
            rpt_df = depth_summ.join(
                flood_summ, lsuffix='_depth', rsuffix='_flood')
            all_nodes = all_nodes.join(rpt_df)  # join to the all_nodes df

        all_nodes = all_nodes.join(self.inp.coordinates[['X', 'Y']])

        def nodexy(row):
            if math.isnan(row.X) or math.isnan(row.Y):
                return None
            else:
                return [(row.X, row.Y)]

        xys = all_nodes.apply(lambda r: nodexy(r), axis=1)
        all_nodes = all_nodes.assign(coords=xys)
        all_nodes = all_nodes.rename(index=str)
        self._nodes_df = all_nodes

        return all_nodes
Beispiel #8
0
    def conduits(self):
        """
        Get/set conduits section of the INP file.

        :return: Conduits section of the INP file
        :rtype: pandas.DataFrame

        Examples:

        >>> import swmmio
        >>> from swmmio.tests.data import MODEL_FULL_FEATURES__NET_PATH
        >>> model = swmmio.Model(MODEL_FULL_FEATURES__NET_PATH)
        >>> model.inp.conduits[['InletNode', 'OutletNode', 'Length', 'ManningN']]
              InletNode OutletNode  Length  ManningN
        Name
        C1:C2        J1         J2  244.63      0.01
        C2.1         J2         J3  666.00      0.01
        1             1          4  400.00      0.01
        2             4          5  400.00      0.01
        3             5         J1  400.00      0.01
        4             3          4  400.00      0.01
        5             2          5  400.00      0.01
        """
        if self._conduits_df is None:
            self._conduits_df = create_dataframeINP(self.path,
                                                    "[CONDUITS]",
                                                    comment_cols=False)
        return self._conduits_df
Beispiel #9
0
    def weirs(self):
        """
        collect all useful and available data related model weirs and
        organize in one dataframe.
        """

        # check if this has been done already and return that data accordingly
        if self._weirs_df is not None:
            return self._weirs_df

        # parse out the main objects of this model
        inp = self.inp
        rpt = self.rpt

        # create dataframes of relevant sections from the INP
        weirs_df = create_dataframeINP(inp.path, "[WEIRS]")
        if weirs_df.empty:
            return pd.DataFrame()

        weirs_df = weirs_df[['InletNode', 'OutletNode', 'WeirType', 'CrestHeight']]

        # add conduit coordinates
        xys = weirs_df.apply(lambda r: get_link_coords(r, self.inp.coordinates, self.inp.vertices), axis=1)
        df = weirs_df.assign(coords=xys.map(lambda x: x[0]))
        df.InletNode = df.InletNode.astype(str)
        df.OutletNode = df.OutletNode.astype(str)

        self._weirs_df = df

        return df
Beispiel #10
0
    def junctions(self):
        """
        Get/set junctions section of the INP file.

        :return: junctions section of the INP file
        :rtype: pandas.DataFrame

        Examples:

        >>> import swmmio
        >>> from swmmio.tests.data import MODEL_FULL_FEATURES__NET_PATH
        >>> model = swmmio.Model(MODEL_FULL_FEATURES__NET_PATH)
        >>> model.inp.junctions
              InvertElev  MaxDepth  InitDepth  SurchargeDepth  PondedArea
        Name
        J3         6.547        15          0               0           0
        1         17.000         0          0               0           0
        2         17.000         0          0               0           0
        3         16.500         0          0               0           0
        4         16.000         0          0               0           0
        5         15.000         0          0               0           0
        J2        13.000        15          0               0           0
        """
        if self._junctions_df is None:
            self._junctions_df = create_dataframeINP(self.path,
                                                     "[JUNCTIONS]",
                                                     comment_cols=False)
        return self._junctions_df
Beispiel #11
0
def search_for_duplicates(inp_path, verbose=False):
    """
    scan an inp file and determine if any element IDs are duplicated in
    any section. Method: count the uniques and compare to total length
    """
    headers = funcs.complete_inp_headers(inp_path)['headers']
    dups_found = False
    for header, cols, in headers.items():
        if cols != 'blob':

            df = dataframes.create_dataframeINP(inp_path, section=header)
            elements = df.index
            n_unique = len(elements.unique())  #number of unique elements
            n_total = len(elements)  #total number of elements
            if verbose:
                print('{} -> (uniques, total) -> ({}, {})'.format(
                    header, n_unique, n_total))

            if n_unique != n_total:
                dups = ', '.join(
                    df[df.index.duplicated()].index.unique().tolist())
                print('duplicate found in {}\nsection: {}\n{}'.format(
                    inp_path, header, dups))
                dups_found = True

    return dups_found
Beispiel #12
0
    def weirs(self):
        """
        collect all useful and available data related model weirs and
        organize in one dataframe.
        """

        # check if this has been done already and return that data accordingly
        if self._weirs_df is not None:
            return self._weirs_df

        # parse out the main objects of this model
        inp = self.inp
        rpt = self.rpt

        # create dataframes of relevant sections from the INP
        weirs_df = create_dataframeINP(inp.path, "[WEIRS]")
        if weirs_df.empty:
            return pd.DataFrame()

        weirs_df = weirs_df[[
            'InletNode', 'OutletNode', 'WeirType', 'CrestHeight'
        ]]

        # add conduit coordinates
        xys = weirs_df.apply(lambda r: get_link_coords(r, self.inp.coordinates,
                                                       self.inp.vertices),
                             axis=1)
        df = weirs_df.assign(coords=xys.map(lambda x: x[0]))
        df.InletNode = df.InletNode.astype(str)
        df.OutletNode = df.OutletNode.astype(str)

        self._weirs_df = df

        return df
Beispiel #13
0
    def orifices(self):
        """
        collect all useful and available data related model orifices and
        organize in one dataframe.
        """

        # check if this has been done already and return that data accordingly
        if self._orifices_df is not None:
            return self._orifices_df

        # parse out the main objects of this model
        inp = self.inp
        rpt = self.rpt

        # create dataframes of relevant sections from the INP
        orifices_df = create_dataframeINP(inp.path,
                                          "[ORIFICES]",
                                          comment_cols=False)
        if orifices_df.empty:
            return pd.DataFrame()

        # add conduit coordinates
        xys = orifices_df.apply(lambda r: get_link_coords(
            r, self.inp.coordinates, self.inp.vertices),
                                axis=1)
        df = orifices_df.assign(coords=xys.map(lambda x: x[0]))
        df.InletNode = df.InletNode.astype(str)
        df.OutletNode = df.OutletNode.astype(str)
        self._orifices_df = df

        return df
Beispiel #14
0
    def orifices(self):
        """
        collect all useful and available data related model orifices and
        organize in one dataframe.
        """

        # check if this has been done already and return that data accordingly
        if self._orifices_df is not None:
            return self._orifices_df

        # parse out the main objects of this model
        inp = self.inp
        rpt = self.rpt

        # create dataframes of relevant sections from the INP
        orifices_df = create_dataframeINP(inp.path, "[ORIFICES]", comment_cols=False)
        if orifices_df.empty:
            return pd.DataFrame()

        # add conduit coordinates
        xys = orifices_df.apply(lambda r: get_link_coords(r, self.inp.coordinates, self.inp.vertices), axis=1)
        df = orifices_df.assign(coords=xys.map(lambda x: x[0]))
        df.InletNode = df.InletNode.astype(str)
        df.OutletNode = df.OutletNode.astype(str)
        self._orifices_df = df

        return df
Beispiel #15
0
    def junctions(self):
        """
        Get/set junctions section of the INP file.

        :return: junctions section of the INP file
        :rtype: pandas.DataFrame

        Examples:

        >>> import swmmio
        >>> from swmmio.tests.data import MODEL_FULL_FEATURES__NET_PATH
        >>> model = swmmio.Model(MODEL_FULL_FEATURES__NET_PATH)
        >>> model.inp.junctions
              InvertElev  MaxDepth  InitDepth  SurchargeDepth  PondedArea
        Name
        J3         6.547        15          0               0           0
        1         17.000         0          0               0           0
        2         17.000         0          0               0           0
        3         16.500         0          0               0           0
        4         16.000         0          0               0           0
        5         15.000         0          0               0           0
        J2        13.000        15          0               0           0
        """
        if self._junctions_df is None:
            self._junctions_df = create_dataframeINP(self.path, "[JUNCTIONS]", comment_cols=False)
        return self._junctions_df
Beispiel #16
0
    def conduits(self):
        """
        Get/set conduits section of the INP file.

        :return: Conduits section of the INP file
        :rtype: pandas.DataFrame

        Examples:

        >>> import swmmio
        >>> from swmmio.tests.data import MODEL_FULL_FEATURES__NET_PATH
        >>> model = swmmio.Model(MODEL_FULL_FEATURES__NET_PATH)
        >>> model.inp.conduits[['InletNode', 'OutletNode', 'Length', 'ManningN']]
              InletNode OutletNode  Length  ManningN
        Name
        C1:C2        J1         J2  244.63      0.01
        C2.1         J2         J3  666.00      0.01
        1             1          4  400.00      0.01
        2             4          5  400.00      0.01
        3             5         J1  400.00      0.01
        4             3          4  400.00      0.01
        5             2          5  400.00      0.01
        """
        if self._conduits_df is None:
            self._conduits_df = create_dataframeINP(self.path, "[CONDUITS]", comment_cols=False)
        return self._conduits_df
Beispiel #17
0
    def __init__(self, model1=None, model2=None, section='[JUNCTIONS]', build_instr_file=None):

        if model1 and model2:
            df1 = create_dataframeINP(model1.inp.path, section)
            df2 = create_dataframeINP(model2.inp.path, section)

            #BUG -> this fails if a df1 or df2 is None i.e. if a section doesn't exist in one model
            added_ids = df2.index.difference(df1.index)
            removed_ids = df1.index.difference(df2.index)

            #find where elements were changed (but kept with same ID)
            common_ids = df1.index.difference(removed_ids) #original - removed = in common
            #both dfs concatenated, with matched indices for each element
            full_set = pd.concat([df1.loc[common_ids], df2.loc[common_ids]])
            #drop dupes on the set, all things that did not changed should have 1 row
            changes_with_dupes = full_set.drop_duplicates()
            #duplicate indicies are rows that have changes, isolate these
            changed_ids = changes_with_dupes.index.get_duplicates()

            added = df2.loc[added_ids].copy()
            added['Comment'] = 'Added'# from model {}'.format(model2.inp.path)
            added['Origin'] = model2.inp.path

            altered = df2.loc[changed_ids].copy()
            altered['Comment'] = 'Altered'# in model {}'.format(model2.inp.path)
            altered['Origin'] = model2.inp.path

            removed = df1.loc[removed_ids].copy()
            #comment out the removed elements
            #removed.index = ["; " + str(x) for x in removed.index]
            removed['Comment'] = 'Removed'# in model {}'.format(model2.inp.path)
            removed['Origin'] = model2.inp.path

            self.old = df1
            self.new = df2
            self.added = added
            self.removed = removed
            self.altered = altered

        if build_instr_file:
            #if generating from a build instructions file, do this (more efficient)
            df = create_dataframeBI(build_instr_file, section = section)

            self.added = df.loc[df['Comment'] == 'Added']
            self.removed = df.loc[df['Comment'] == 'Removed']
            self.altered = df.loc[df['Comment'] == 'Altered']
Beispiel #18
0
 def orifices(self):
     """
     Get/set orifices section of the INP file.
     """
     if self._orifices_df is None:
         self._orifices_df = create_dataframeINP(self.path,
                                                 "[ORIFICES]",
                                                 comment_cols=False)
     return self._orifices_df
Beispiel #19
0
 def polygons(self):
     """
     get/set polygons section of model
     :return: dataframe of model coordinates
     """
     if self._polygons_df is not None:
         return self._polygons_df
     self._polygons_df = create_dataframeINP(self.path, '[Polygons]', comment_cols=False)
     return self._polygons_df
Beispiel #20
0
 def weirs(self):
     """
     Get/set weirs section of the INP file.
     """
     if self._weirs_df is None:
         self._weirs_df = create_dataframeINP(self.path,
                                              "[WEIRS]",
                                              comment_cols=False)
     return self._weirs_df
Beispiel #21
0
 def vertices(self):
     """
     get/set vertices section of model
     :return: dataframe of model coordinates
     """
     if self._vertices_df is not None:
         return self._vertices_df
     self._vertices_df = create_dataframeINP(self.path, '[VERTICES]', comment_cols=False)
     return self._vertices_df
Beispiel #22
0
 def xsections(self):
     """
     Get/set pumps section of the INP file.
     """
     if self._xsections_df is None:
         self._xsections_df = create_dataframeINP(self.path,
                                                  "[XSECTIONS]",
                                                  comment_cols=False)
     return self._xsections_df
Beispiel #23
0
 def coordinates(self):
     """
     Get/set coordinates section of model
     :return: dataframe of model coordinates
     """
     if self._coordinates_df is not None:
         return self._coordinates_df
     self._coordinates_df = create_dataframeINP(self.path, "[COORDINATES]", comment_cols=False)
     return self._coordinates_df
Beispiel #24
0
 def pumps(self):
     """
     Get/set pumps section of the INP file.
     """
     if self._pumps_df is None:
         self._pumps_df = create_dataframeINP(self.path,
                                              "[PUMPS]",
                                              comment_cols=False)
     return self._pumps_df
Beispiel #25
0
    def conduits(self):
        """
        collect all useful and available data related model conduits and
        organize in one dataframe.
        """

        # check if this has been done already and return that data accordingly
        if self._conduits_df is not None:
            return self._conduits_df

        # parse out the main objects of this model
        inp = self.inp
        rpt = self.rpt

        # create dataframes of relevant sections from the INP
        conduits_df = create_dataframeINP(inp.path, "[CONDUITS]", comment_cols=False)
        xsections_df = create_dataframeINP(inp.path, "[XSECTIONS]", comment_cols=False)
        conduits_df = conduits_df.join(xsections_df)

        if rpt:
            # create a dictionary holding data from an rpt file, if provided
            link_flow_df = create_dataframeRPT(rpt.path, "Link Flow Summary")
            conduits_df = conduits_df.join(link_flow_df)

        # add conduit coordinates
        xys = conduits_df.apply(lambda r: get_link_coords(r, self.inp.coordinates, self.inp.vertices), axis=1)
        df = conduits_df.assign(coords=xys.map(lambda x: x[0]))

        # add conduit up/down inverts and calculate slope
        elevs = self.nodes()[['InvertElev']]
        df = pd.merge(df, elevs, left_on='InletNode', right_index=True, how='left')
        df = df.rename(index=str, columns={"InvertElev": "InletNodeInvert"})
        df = pd.merge(df, elevs, left_on='OutletNode', right_index=True, how='left')
        df = df.rename(index=str, columns={"InvertElev": "OutletNodeInvert"})
        df['UpstreamInvert'] = df.InletNodeInvert + df.InletOffset
        df['DownstreamInvert'] = df.OutletNodeInvert + df.OutletOffset
        df['SlopeFtPerFt'] = (df.UpstreamInvert - df.DownstreamInvert) / df.Length

        df.InletNode = df.InletNode.astype(str)
        df.OutletNode = df.OutletNode.astype(str)

        self._conduits_df = df

        return df
Beispiel #26
0
 def infiltration(self):
     """
     Get/set infiltration section of the INP file.
     """
     if self._infiltration_df is None:
         self._infiltration_df = create_dataframeINP(self.path,
                                                     "[INFILTRATION]",
                                                     comment_cols=False,
                                                     headers=self._headers)
     return self._infiltration_df
Beispiel #27
0
 def subareas(self):
     """
     Get/set subareas section of the INP file.
     """
     if self._subareas_df is None:
         self._subareas_df = create_dataframeINP(self.path,
                                                 "[SUBAREAS]",
                                                 comment_cols=False,
                                                 headers=self._headers)
     return self._subareas_df
Beispiel #28
0
def redefine_time_series(inp):

    # Redefining timeseries if these are not in the right directory.
    try:
        with HiddenPrints():
            #Changing the path of the Time series to run models.
            from swmmio.utils import modify_model
            from swmmio.utils.modify_model import replace_inp_section
            from swmmio.utils.dataframes import create_dataframeINP
            # Extracting a section (Timeseries) to a pandas DataFrame

            path = inp.model_dir
            inp_file = inp.model_name
            baseline = path + '/' + inp_file + '.inp'

            # Create a dataframe of the model's time series
            Timeseries = create_dataframeINP(baseline, '[TIMESERIES]')
            New_Timeseries = Timeseries.copy()
            # Modify the path containing the timeseries
            for i in range(len(Timeseries)):
                string = Timeseries.iloc[i][0]

                if '"' in string:  # Check if string is an external file if not nothing is changed.
                    # This might be slow of the timeseries are long
                    Rainfile_old = string.split('"')[-2]
                    if '\\' in Rainfile_old:  # If the rain file is in an absolute path this is changed (Also if is in another directory than the model)
                        Rainfile_old = Rainfile_old.split('/')[-1]
                    Rain_name = string.split('"')[-3]
                    Rainfile_new = path + '/' + Rainfile_old
                    New_Timeseries.iloc[i][
                        0] = Rain_name + '"' + Rainfile_new + '"'
                    print('Rainfile_new: ' + Rainfile_new)
                    print('Rainfile_old: ' + Rainfile_old)
                    print('Rain_name:' + Rain_name)
                else:
                    New_Timeseries.iloc[i][0] = np.nan
            New_Timeseries.dropna(inplace=True)

            # Create a temporary file with the adjusted path
            new_file = inp_file + '_tmp.inp'
            shutil.copyfile(baseline, path + '/' + new_file)

            # print('path:' + path )
            #Overwrite the TIMESERIES section of the new model with the adjusted data
            with pd.option_context(
                    'display.max_colwidth', 400
            ):  # set the maximum length of string to prit the full string into .inp
                replace_inp_section(path + '/' + new_file, '[TIMESERIES]',
                                    New_Timeseries)

            model_inp = inp.model_dir + '/' + inp.model_name + '_tmp.inp'
    except KeyError:
        model_inp = inp.model_dir + '/' + inp.model_name + '.inp'

    return model_inp
Beispiel #29
0
def drop_invalid_model_elements(inp):
    """
    Identify references to elements in the model that are undefined and remove them from the
    model. These should coincide with warnings/errors produced by SWMM5 when undefined elements
    are referenced in links, subcatchments, and controls.
    :param model: swmmio.Model
    :return:
    >>> import swmmio
    >>> from swmmio.tests.data import MODEL_FULL_FEATURES_INVALID
    >>> m = swmmio.Model(MODEL_FULL_FEATURES_INVALID)
    >>> drop_invalid_model_elements(m.inp)
    ['InvalidLink2', 'InvalidLink1']
    >>> m.inp.conduits.index
    Index(['C1:C2', 'C2.1', '1', '2', '4', '5'], dtype='object', name='Name')
    """
    from swmmio.utils.dataframes import create_dataframeINP
    juncs = create_dataframeINP(inp.path, "[JUNCTIONS]").index.tolist()
    outfs = create_dataframeINP(inp.path, "[OUTFALLS]").index.tolist()
    stors = create_dataframeINP(inp.path, "[STORAGE]").index.tolist()
    nids = juncs + outfs + stors

    # drop links with bad refs to inlet/outlet nodes
    from swmmio.utils.functions import find_invalid_links
    inv_conds = find_invalid_links(inp, nids, 'conduits', drop=True)
    inv_pumps = find_invalid_links(inp, nids, 'pumps', drop=True)
    inv_orifs = find_invalid_links(inp, nids, 'orifices', drop=True)
    inv_weirs = find_invalid_links(inp, nids, 'weirs', drop=True)

    # drop other parts of bad links
    invalid_links = inv_conds + inv_pumps + inv_orifs + inv_weirs
    inp.xsections = inp.xsections.loc[~inp.xsections.index.isin(invalid_links)]

    # drop invalid subcats and their related components
    invalid_subcats = inp.subcatchments.index[~inp.subcatchments['Outlet'].
                                              isin(nids)]
    inp.subcatchments = inp.subcatchments.loc[~inp.subcatchments.index.
                                              isin(invalid_subcats)]
    inp.subareas = inp.subareas.loc[~inp.subareas.index.isin(invalid_subcats)]
    inp.infiltration = inp.infiltration.loc[~inp.infiltration.index.
                                            isin(invalid_subcats)]

    return invalid_links + invalid_subcats
Beispiel #30
0
 def coordinates(self):
     """
     Get/set coordinates section of model
     :return: dataframe of model coordinates
     """
     if self._coordinates_df is not None:
         return self._coordinates_df
     self._coordinates_df = create_dataframeINP(self.path,
                                                "[COORDINATES]",
                                                comment_cols=False)
     return self._coordinates_df
Beispiel #31
0
 def polygons(self):
     """
     get/set polygons section of model
     :return: dataframe of model coordinates
     """
     if self._polygons_df is not None:
         return self._polygons_df
     self._polygons_df = create_dataframeINP(self.path,
                                             '[Polygons]',
                                             comment_cols=False)
     return self._polygons_df
Beispiel #32
0
 def vertices(self):
     """
     get/set vertices section of model
     :return: dataframe of model coordinates
     """
     if self._vertices_df is not None:
         return self._vertices_df
     self._vertices_df = create_dataframeINP(self.path,
                                             '[VERTICES]',
                                             comment_cols=False)
     return self._vertices_df
Beispiel #33
0
    def storage(self):
        """
        Get/set storage section of the INP file.

        :return: storage section of the INP file
        :rtype: pandas.DataFrame

        Examples:
        """
        if self._storage_df is None:
            self._storage_df = create_dataframeINP(self.path,
                                                   "[STORAGE]",
                                                   comment_cols=False)
        return self._storage_df
Beispiel #34
0
    def files(self):
        """
        Get/set files section of the INP file.

        :return: files section of the INP file
        :rtype: pandas.DataFrame

        Examples:
        """
        if self._files_df is None:
            self._files_df = create_dataframeINP(self.path,
                                                 "[FILES]",
                                                 comment_cols=False)
        return self._files_df.reset_index()
Beispiel #35
0
    def build(self, baseline_dir, target_path):
        """
        build a complete INP file with the build instructions committed to a
        baseline model.
        """
        basemodel = swmmio.Model(baseline_dir)
        allheaders = funcs.complete_inp_headers(basemodel.inp.filePath)
        #new_inp = os.path.join(target_dir, 'model.inp')
        with open(target_path, 'w') as f:
            for section in allheaders['order']:

                #check if the section is not in problem_sections and there are changes
                #in self.instructions and commit changes to it from baseline accordingly
                if (section not in problem_sections
                        and allheaders['headers'][section] != 'blob'
                        and section in self.instructions):

                    #df of baseline model section
                    basedf = create_dataframeINP(basemodel.inp, section)

                    #grab the changes to
                    changes = self.instructions[section]

                    #remove elements that have alterations and or tagged for removal
                    remove_ids = changes.removed.index | changes.altered.index
                    new_section = basedf.drop(remove_ids)

                    #add elements
                    new_section = pd.concat(
                        [new_section, changes.altered, changes.added])
                else:
                    #section is not well understood or is problematic, just blindly copy
                    new_section = create_dataframeINP(basemodel.inp,
                                                      section=section)

                #write the section
                vc_utils.write_inp_section(f, allheaders, section, new_section)
Beispiel #36
0
    def options(self):
        """
        Get/set options section of the INP file.

        :return: options section of the INP file
        :rtype: pandas.DataFrame

        Examples:
        """
        if self._options_df is None:
            self._options_df = create_dataframeINP(self.path,
                                                   "[OPTIONS]",
                                                   comment_cols=False,
                                                   headers=self._headers)
        return self._options_df
Beispiel #37
0
    def subcatchments(self):
        """
        Get/set subcatchments section of the INP file.

        :return: subcatchments section of the INP file
        :rtype: pandas.DataFrame

        Examples:
        """
        if self._subcatchments_df is None:
            self._subcatchments_df = create_dataframeINP(self.path,
                                                         "[SUBCATCHMENTS]",
                                                         comment_cols=False,
                                                         headers=self._headers)
        return self._subcatchments_df
Beispiel #38
0
    def outfalls(self):
        """
        Get/set outfalls section of the INP file.

        :return: outfalls section of the INP file
        :rtype: pandas.DataFrame

        Examples:

        >>> import swmmio
        >>> from swmmio.tests.data import MODEL_FULL_FEATURES__NET_PATH
        >>> model = swmmio.Model(MODEL_FULL_FEATURES__NET_PATH)
        >>> model.inp.outfalls
              InvertElev OutfallType StageOrTimeseries  TideGate
        Name
        J4             0        FREE                NO       NaN
        """
        if self._outfalls_df is None:
            self._outfalls_df = create_dataframeINP(self.path, "[OUTFALLS]", comment_cols=False)
        return self._outfalls_df
Beispiel #39
0
    def subcatchments(self):
        """
        collect all useful and available data related subcatchments and organize
        in one dataframe.
        """
        subs = create_dataframeINP(self.inp.path, "[SUBCATCHMENTS]")
        subs = subs.drop([';', 'Comment', 'Origin'], axis=1)
        polygons_df = self.inp.polygons

        if self.rpt:
            flw = create_dataframeRPT(self.rpt.path,
                                      'Subcatchment Runoff Summary')
            subs = subs.join(flw)

            # more accurate runoff calculations
            subs['RunoffAcFt'] = subs.TotalRunoffIn / 12.0 * subs.Area
            subs['RunoffMGAccurate'] = subs.RunoffAcFt / 3.06888785

        self._subcatchments_df = subs

        return subs
Beispiel #40
0
    def subcatchments(self):
        """
        collect all useful and available data related subcatchments and organize
        in one dataframe.
        """
        subs = create_dataframeINP(self.inp.path, "[SUBCATCHMENTS]")
        subs = subs.drop([';', 'Comment', 'Origin'], axis=1)
        polygons_df = self.inp.polygons

        if self.rpt:
            flw = create_dataframeRPT(
                self.rpt.path, 'Subcatchment Runoff Summary')
            subs = subs.join(flw)

            # more accurate runoff calculations
            subs['RunoffAcFt'] = subs.TotalRunoffIn / 12.0 * subs.Area
            subs['RunoffMGAccurate'] = subs.RunoffAcFt / 3.06888785

        self._subcatchments_df = subs

        return subs
Beispiel #41
0
    def outfalls(self):
        """
        Get/set outfalls section of the INP file.

        :return: outfalls section of the INP file
        :rtype: pandas.DataFrame

        Examples:

        >>> import swmmio
        >>> from swmmio.tests.data import MODEL_FULL_FEATURES__NET_PATH
        >>> model = swmmio.Model(MODEL_FULL_FEATURES__NET_PATH)
        >>> model.inp.outfalls
              InvertElev OutfallType StageOrTimeseries  TideGate
        Name
        J4             0        FREE                NO       NaN
        """
        if self._outfalls_df is None:
            self._outfalls_df = create_dataframeINP(self.path,
                                                    "[OUTFALLS]",
                                                    comment_cols=False)
        return self._outfalls_df
Beispiel #42
0
def search_for_duplicates(inp_path, verbose = False):
    """
    scan an inp file and determine if any element IDs are duplicated in
    any section. Method: count the uniques and compare to total length
    """
    headers = funcs.complete_inp_headers(inp_path)['headers']
    dups_found = False
    for header, cols, in headers.items():
        if cols != 'blob':

            df = dataframes.create_dataframeINP(inp_path, section=header)
            elements = df.index
            n_unique = len(elements.unique()) #number of unique elements
            n_total = len(elements) #total number of elements
            if verbose:
                print('{} -> (uniques, total) -> ({}, {})'.format(header, n_unique , n_total))

            if n_unique != n_total:
                dups = ', '.join(df[df.index.duplicated()].index.unique().tolist())
                print('duplicate found in {}\nsection: {}\n{}'.format(inp_path, header, dups))
                dups_found = True

    return dups_found
Beispiel #43
0
def generate_inp_from_diffs(basemodel, inpdiffs, target_dir):
    """
    create a new inp with respect to a baseline inp and changes instructed
    with a list of inp diff files (build instructions). This saves having to
    recalculate the differences of each model from the baseline whenever we want
    to combine versions.
    """

    #step 1 --> combine the diff/build instructions
    allheaders = funcs.complete_inp_headers(basemodel.inp.filePath)
    combi_build_instr_file = os.path.join(target_dir, 'build_instructions.txt')
    newinp = os.path.join(target_dir, 'new.inp')
    with open(combi_build_instr_file, 'w') as f:
        for header in allheaders['order']:
            s = ''
            section_header_written = False
            for inp in inpdiffs:
                sect_s = None
                if not section_header_written:
                    sect_s = text.extract_section_from_inp(inp,
                                                           header,
                                                           cleanheaders=False,
                                                           return_string=True,
                                                           skipheaders=False)
                    section_header_written = True

                else:
                    sect_s = text.extract_section_from_inp(inp,
                                                           header,
                                                           cleanheaders=False,
                                                           return_string=True,
                                                           skipheaders=True)

                if sect_s:
                    #remove the extra space between data in the same table
                    #coming from diffrent models.
                    if sect_s[-2:] == '\n\n':  #NOTE Check this section...
                        s += sect_s[:-1]
                    else:
                        s += sect_s

            f.write(s + '\n')

    #step 2 --> clean up the new combined diff instructions
    df_dict = clean_inp_diff_formatting(
        combi_build_instr_file)  #makes more human readable

    #step 3 --> create a new inp based on the baseline, with the inp_diff
    #instructions applied
    with open(newinp, 'w') as f:
        for section in allheaders['order']:
            print section
            if section not in problem_sections and allheaders['headers'][
                    section] != 'blob':
                #check if a changes from baseline spreadheet exists, and use this
                #information if available to create the changes array
                df = create_dataframeINP(basemodel.inp, section)
                df['Origin'] = ''  #add the origin column if not there
                if section in df_dict:
                    df_change = df_dict[section]
                    ids_to_drop = df_change.loc[df_change['Comment'].isin(
                        ['Removed', 'Altered'])].index
                    df = df.drop(ids_to_drop)
                    df = df.append(df_change.loc[df_change['Comment'].isin(
                        ['Added', 'Altered'])])
                new_section = df
            else:
                #blindly copy this section from the base model
                new_section = create_dataframeINP(basemodel.inp,
                                                  section=section)

            #write the section into the inp file and the excel file
            vc_utils.write_inp_section(f, allheaders, section, new_section)
Beispiel #44
0
def generate_inp_from_diffs(basemodel, inpdiffs, target_dir):
    """
    create a new inp with respect to a baseline inp and changes instructed
    with a list of inp diff files (build instructions). This saves having to
    recalculate the differences of each model from the baseline whenever we want
    to combine versions.

    NOTE THIS ISN'T USED ANYWHERE. DELETE ????
    """

    #step 1 --> combine the diff/build instructions
    allheaders = funcs.complete_inp_headers(basemodel.inp.path)
    combi_build_instr_file = os.path.join(target_dir, 'build_instructions.txt')
    newinp = os.path.join(target_dir, 'new.inp')
    with open (combi_build_instr_file, 'w') as f:
        for header in allheaders['order']:
            s = ''
            section_header_written = False
            for inp in inpdiffs:
                sect_s = None
                if not section_header_written:
                    sect_s = text.extract_section_from_inp(inp, header,
                                                           cleanheaders=False,
                                                           return_string=True,
                                                           skipheaders=False)
                    section_header_written = True

                else:
                    sect_s = text.extract_section_from_inp(inp, header,
                                                           cleanheaders=False,
                                                           return_string=True,
                                                           skipheaders=True)

                if sect_s:
                    #remove the extra space between data in the same table
                    #coming from diffrent models.
                    if sect_s[-2:] == '\n\n':   #NOTE Check this section...
                        s += sect_s[:-1]
                    else:
                        s += sect_s

            f.write(s + '\n')

    #step 2 --> clean up the new combined diff instructions
    # df_dict = clean_inp_diff_formatting(combi_build_instr_file) #makes more human readable

    #step 3 --> create a new inp based on the baseline, with the inp_diff
    #instructions applied
    with open (newinp, 'w') as f:
        for section in allheaders['order']:
            print(section)
            if section not in problem_sections and allheaders['headers'][section] != 'blob':
                #check if a changes from baseline spreadheet exists, and use this
                #information if available to create the changes array
                df = create_dataframeINP(basemodel.inp.path, section)
                df['Origin'] = '' #add the origin column if not there
                if section in df_dict:
                    df_change = df_dict[section]
                    ids_to_drop = df_change.loc[df_change['Comment'].isin(['Removed', 'Altered'])].index
                    df = df.drop(ids_to_drop)
                    df = df.append(df_change.loc[df_change['Comment'].isin(['Added', 'Altered'])])
                new_section = df
            else:
                #blindly copy this section from the base model
                new_section = create_dataframeINP(basemodel.inp.path, section=section)

            #write the section into the inp file and the excel file
            vc_utils.write_inp_section(f, allheaders, section, new_section)
Beispiel #45
0
    def __init__(self,
                 model1=None,
                 model2=None,
                 section='[JUNCTIONS]',
                 build_instr_file=None):

        if model1 and model2:
            df1 = create_dataframeINP(model1.inp.path, section)
            df2 = create_dataframeINP(model2.inp.path, section)
            m2_origin_string = os.path.basename(model2.inp.path).replace(
                ' ', '-')

            # BUG -> this fails if a df1 or df2 is None i.e. if a section doesn't exist in one model
            added_ids = df2.index.difference(df1.index)
            removed_ids = df1.index.difference(df2.index)

            # find where elements were changed (but kept with same ID)
            common_ids = df1.index.difference(
                removed_ids)  # original - removed = in common
            # both dfs concatenated, with matched indices for each element
            full_set = pd.concat([df1.loc[common_ids], df2.loc[common_ids]])
            # remove whitespace
            full_set = full_set.apply(lambda x: x.str.strip()
                                      if x.dtype == "object" else x)
            # drop dupes on the set, all things that did not changed should have 1 row
            changes_with_dupes = full_set.drop_duplicates()
            # duplicate indicies are rows that have changes, isolate these
            # idx[idx.duplicated()].unique()
            changed_ids = changes_with_dupes.index[
                changes_with_dupes.index.duplicated()].unique(
                )  # .get_duplicates()

            added = df2.loc[added_ids].copy()
            added[
                'Comment'] = 'Added'  # from model {}'.format(model2.inp.path)
            added['Origin'] = m2_origin_string

            altered = df2.loc[changed_ids].copy()
            altered[
                'Comment'] = 'Altered'  # in model {}'.format(model2.inp.path)
            altered['Origin'] = m2_origin_string

            removed = df1.loc[removed_ids].copy()
            # comment out the removed elements
            # removed.index = ["; " + str(x) for x in removed.index]
            removed[
                'Comment'] = 'Removed'  # in model {}'.format(model2.inp.path)
            removed['Origin'] = m2_origin_string

            self.old = df1
            self.new = df2
            self.added = added
            self.removed = removed
            self.altered = altered

        if build_instr_file:
            # if generating from a build instructions file, do this (more efficient)
            df = create_dataframeBI(build_instr_file, section=section)

            self.added = df.loc[df['Comment'] == 'Added']
            self.removed = df.loc[df['Comment'] == 'Removed']
            self.altered = df.loc[df['Comment'] == 'Altered']