def create_inp_build_instructions(inpA, inpB, path, filename, comments=''): """ pass in two inp file paths and produce a spreadsheet showing the differences found in each of the INP sections. These differences should then be used whenever we need to rebuild this model from the baseline reference model. Note: this should be split into a func that creates a overall model "diff" that can then be written as a BI file or used programmatically """ allsections_a = funcs.complete_inp_headers(inpA) modela = swmmio.Model(inpA) modelb = swmmio.Model(inpB) #create build insructions folder if not os.path.exists(path): os.makedirs(path) filepath = os.path.join(path, filename) + '.txt' # xlpath = os.path.join(path, filename) + '.xlsx' # excelwriter = pd.ExcelWriter(xlpath) # vc_utils.create_change_info_sheet(excelwriter, modela, modelb) problem_sections = [ '[TITLE]', '[CURVES]', '[TIMESERIES]', '[RDII]', '[HYDROGRAPHS]' ] with open(filepath, 'w') as newf: #write meta data metadata = { #'Baseline Model':modela.inp.path, #'ID':filename, 'Parent Models': { 'Baseline': { inpA: vc_utils.modification_date(inpA) }, 'Alternatives': { inpB: vc_utils.modification_date(inpB) } }, 'Log': { filename: comments } } #print metadata vc_utils.write_meta_data(newf, metadata) for section in allsections_a['order']: if section not in problem_sections: #calculate the changes in the current section changes = INPDiff(modela, modelb, section) data = pd.concat( [changes.removed, changes.added, changes.altered]) #vc_utils.write_excel_inp_section(excelwriter, allsections_a, section, data) vc_utils.write_inp_section( newf, allsections_a, section, data, pad_top=False, na_fill='NaN') #na fill fixes SNOWPACK blanks spaces issue
def test_model_to_networkx(): m = swmmio.Model(MODEL_FULL_FEATURES__NET_PATH) G = m.network assert(G['J2']['J3']['C2.1']['Length'] == 666) assert(G['J1']['J2']['C1:C2']['Length'] == 244.63) assert(round(G.node['J2']['InvertElev'], 3) == 13.392)
def test_nodes_dataframe(): m = swmmio.Model(MODEL_XSECTION_ALT_01) nodes = m.nodes() node_ids_01 = ['dummy_node1','dummy_node2','dummy_node3','dummy_node4', 'dummy_node5','dummy_node6','dummy_outfall'] assert(list(nodes.index) == node_ids_01) assert(nodes.loc['dummy_node1', 'InvertElev'] == -10.99) assert(nodes.loc['dummy_node2', 'MaxDepth'] == 20) assert(nodes.loc['dummy_node3', 'X'] == -4205.457) assert(nodes.loc['dummy_node4', 'MaxDepth'] == 12.59314) assert(nodes.loc['dummy_node5', 'PondedArea'] == 73511)
def read_report_dir(rptdir, total_parcel_count=0): #rpt dir passed in, just read the preprossed report data rpt = FloodReport() rpt.total_parcel_count = total_parcel_count rpt.model = swmmio.Model(os.path.dirname(rptdir)) rpt.scenario = rpt.model.scenario rpt.parcel_flooding = pd.read_csv( os.path.join(rptdir, 'parcel_flood_comparison.csv')) rpt.parcel_hrs_flooded = rpt.parcel_flooding.HoursFloodedProposed.sum() rpt.parcel_vol_flooded = rpt.parcel_flooding.TotalFloodVolProposed.sum() costcsv = os.path.join(rptdir, 'cost_estimate.csv') conduits_geojson_path = os.path.join(rptdir, 'new_conduits.json') if os.path.exists(costcsv): #calc the cost estimate total in millions cost_df = pd.read_csv(costcsv) rpt.cost_estimate = cost_df.TotalCostEstimate.sum() / math.pow(10, 6) if os.path.exists(conduits_geojson_path): with open(conduits_geojson_path, 'r') as f: rpt.new_conduits_geojson = geojson.loads(f.read()) return rpt
def build(self, baseline_dir, target_path): """ build a complete INP file with the build instructions committed to a baseline model. """ basemodel = swmmio.Model(baseline_dir) allheaders = funcs.complete_inp_headers(basemodel.inp.filePath) #new_inp = os.path.join(target_dir, 'model.inp') with open(target_path, 'w') as f: for section in allheaders['order']: #check if the section is not in problem_sections and there are changes #in self.instructions and commit changes to it from baseline accordingly if (section not in problem_sections and allheaders['headers'][section] != 'blob' and section in self.instructions): #df of baseline model section basedf = create_dataframeINP(basemodel.inp, section) #grab the changes to changes = self.instructions[section] #remove elements that have alterations and or tagged for removal remove_ids = changes.removed.index | changes.altered.index new_section = basedf.drop(remove_ids) #add elements new_section = pd.concat( [new_section, changes.altered, changes.added]) else: #section is not well understood or is problematic, just blindly copy new_section = create_dataframeINP(basemodel.inp, section=section) #write the section vc_utils.write_inp_section(f, allheaders, section, new_section)
def test_conduits_dataframe(): m = swmmio.Model(MODEL_FULL_FEATURES_PATH) conduits = m.conduits() assert(list(conduits.index) == ['C1:C2'])
from shutil import copyfile from get_contributing_area import get_upstream_nodes from swmmio import swmmio model_input_file = "../hague_model/v2014_Hague_EX_10yr_MHHW_mod2_trim.inp" model_input_file_tmp = model_input_file.replace(".inp", "_tmp.inp") copyfile(model_input_file, model_input_file.replace(".inp", "_tmp.inp")) mymodel = swmmio.Model(model_input_file) nodes = mymodel.nodes() cons = mymodel.conduits() subs = mymodel.subcatchments() non_important_outfalls = ['D14200', 'D143000', 'D14860', 'D1489', 'D14240', 'D14153', 'D14110', 'E14310', 'E145200', 'E14330', 'D14165', 'D14124', 'D14300'] non_rel_nodes = [] for out in non_important_outfalls: us_nodes = get_upstream_nodes(out, cons) non_rel_nodes.extend(us_nodes) non_rel_nodes.append(out) relevant_lines = [] with open(model_input_file_tmp, 'r') as inpfile: for line in inpfile: if all(node not in line for node in non_rel_nodes): relevant_lines.append(line) with open(model_input_file_tmp, 'w') as inpfile: inpfile.writelines(relevant_lines)
def replace_inp_section(inp_path, modified_section_header, new_data, overwrite=True): """ modify an existing model by passing in new data (Pandas Dataframe) and the section header that should be modified. This funciton will overwrite all data in the old section with the passed data """ tmpfilename = os.path.splitext(os.path.basename(inp_path))[0] + '_mod.inp' wd = os.path.dirname(inp_path) tmpfilepath = os.path.join(os.path.dirname(inp_path), tmpfilename) allheaders = complete_inp_headers(inp_path) basemodel = swmmio.Model(inp_path) with open(inp_path) as oldf: with open(tmpfilepath, 'w') as new: #create the companion excel file #create the MS Excel writer object # xlpath = os.path.join(wd, basemodel.inp.name + '_modified.xlsx') # excelwriter = pd.ExcelWriter(xlpath) # vc_utils.create_info_sheet(excelwriter, basemodel) #write each line as is from the original model until we find the #header of the section we wish to overwrite found_section = False found_next_section = False for line in oldf: if modified_section_header in line: #write the replacement data in the new file now vc_utils.write_inp_section(new, allheaders, modified_section_header, new_data, pad_top=False) found_section = True if (found_section and not found_next_section and line.strip() in allheaders['headers'] and modified_section_header != line.strip()): found_next_section = True new.write('\n\n') #add some space before the next section if found_next_section or not found_section: #write the lines from the original file #if we haven't found the section to modify. #if we have found the section and we've found the NEXT section #continue writing original file's lines new.write(line) if not found_section: #the header doesn't exist in the old model #so we should append it to the bottom of file vc_utils.write_inp_section(new, allheaders, modified_section_header, new_data) # excelwriter.save() #rename files and remove old if we should overwrite if overwrite: os.remove(inp_path) os.rename(tmpfilepath, inp_path) return swmmio.Model(inp_path)
import numpy as np from swmmio import swmmio from swmmio.utils.modify_model import replace_inp_section from get_contributing_area import get_upstream_nodes, get_upstream_conduits import pandas as pd from shutil import copyfile def make_new_df(index, cols, data): df = pd.DataFrame(index=index, data=dict(zip(cols, data))) return df[cols] # read in template model template_inp = "../brambleton/template.inp" template_model = swmmio.Model(template_inp) # copy template model target_inp = "../brambleton/brambleton.inp" copyfile(template_inp, target_inp) # read in node and pipe input data node_data_file = "../brambleton/spatial/nodes_attr.csv" ndf = pd.read_csv(node_data_file) ndf.set_index("Structure_", inplace=True) ndf.sort_index(inplace=True) pipe_data_file = "../brambleton/spatial/pipes_attr.csv" pdf = pd.read_csv(pipe_data_file) # get just the nodes in our area # todo: needs to take a list of outlet nodes outlet_id = 'F15531' us_node_col_name = "Upstream_S"
from swmmio import swmmio from get_contributing_area import get_contributing_area mymodel = swmmio.Model("../hague_model/v2014_Hague_EX_10yr_MHHW_mod2.inp") nodes = mymodel.nodes() cons = mymodel.conduits() subs = mymodel.subcatchments() a0 = get_contributing_area("St1", cons, subs) a1 = get_contributing_area("E143351", cons, subs) a2 = get_contributing_area("E144050", cons, subs) a3 = get_contributing_area("E146004", cons, subs) a4 = get_contributing_area("F134101", cons, subs) print a4