Example #1
0
def test_merge_models():

    from swmmio import Model
    import pandas as pd
    with tempfile.TemporaryDirectory() as tempdir:
        # MODEL_FULL_FEATURES_XY, MODEL_FULL_FEATURES_XY_B
        target_merged_model_ab = os.path.join(tempdir,
                                              'merged-model-test-ab.inp')
        target_merged_model_ba = os.path.join(tempdir,
                                              'merged-model-test-ba.inp')
        merged_model_ab = merge_models(MODEL_FULL_FEATURES_XY,
                                       MODEL_FULL_FEATURES_XY_B,
                                       target_merged_model_ab)
        merged_model_ba = merge_models(MODEL_FULL_FEATURES_XY_B,
                                       MODEL_FULL_FEATURES_XY,
                                       target_merged_model_ba)
        ma = Model(MODEL_FULL_FEATURES_XY)
        mb = Model(MODEL_FULL_FEATURES_XY_B)

        conds_ab = pd.concat([ma.inp.conduits, mb.inp.conduits]).sort_index()
        mab = Model(merged_model_ab)
        mba = Model(merged_model_ba)
        conds_ab_merged = mab.inp.conduits.sort_index()
        conds_ba_merged = mba.inp.conduits.sort_index()

        assert conds_ab.equals(conds_ab_merged)
        assert conds_ab.equals(conds_ba_merged)
Example #2
0
def test_modify_model():
    from swmmio.utils.modify_model import replace_inp_section
    from swmmio import Model
    import pandas as pd

    # initialize a baseline model object
    baseline = Model(MODEL_FULL_FEATURES_XY)
    of_test = pd.read_csv(OUTFALLS_MODIFIED, index_col=0)
    rise = 10.0  # set the starting sea level rise condition

    # create a dataframe of the model's outfalls
    outfalls = dataframe_from_inp(baseline.inp.path, '[OUTFALLS]')

    # add the current rise to the outfalls' stage elevation
    outfalls['OutfallType'] = 'FIXED'
    outfalls.loc[:, 'InvertElev'] = pd.to_numeric(
        outfalls.loc[:, 'InvertElev']) + rise
    of_test.loc[:, 'InvertElev'] = pd.to_numeric(of_test.loc[:, 'InvertElev'])

    with tempfile.TemporaryDirectory() as tempdir:
        # copy the base model into a new directory
        newdir = os.path.join(tempdir, str(rise))
        makedirs(newdir)
        newfilepath = os.path.join(
            newdir, baseline.inp.name + "_" + str(rise) + '_SLR.inp')
        shutil.copyfile(baseline.inp.path, newfilepath)

        # Overwrite the OUTFALLS section of the new model with the adjusted data
        replace_inp_section(newfilepath, '[OUTFALLS]', outfalls)

        m2 = Model(newfilepath)
        of2 = m2.inp.outfalls
        assert (of2.loc['J4', 'InvertElev'].round(1) == of_test.loc[
            'J4', 'InvertElev'].round(1))
def test_model_section(test_model):
    group = ModelSection(test_model, 'junctions')
    print(group)

    bayside = Model(MODEL_FULL_FEATURES__NET_PATH)

    a = bayside.inp.junctions
    print(a)
Example #4
0
def run_swmm_engine(inp_folder):

    logfile = os.path.join(wd, 'log_'+log_start_time+'.txt')

    m = Model(inp_folder)
    if not m.rpt_is_valid():
        # if the rpt is not valid i.e. not having current, usable data: run
        with open (logfile, 'a') as f:
            now = datetime.now().strftime("%y-%m-%d %H:%M")
            f.write('{}: started at {} '.format(m.inp.name, now))
            # print 'running {}\n'.format(m.inp.name)
            run.run_hot_start_sequence(m.inp.path)
            now = datetime.now().strftime("%y-%m-%d %H:%M")
            f.write(', completed at {}\n'.format(now))
    else:
        with open (logfile, 'a') as f:
            f.write('{}: skipped (up-to-date)\n'.format(m.inp.name))
Example #5
0
def batch_cost_estimates(baseline_dir,
                         segments_dir,
                         options_dir,
                         results_file,
                         supplemental_cost_data=None,
                         create_proj_reports=True):
    """
    DEPRECIATED

    compute the cost estimate of each model/option in the segments and
    combinations directories. Resulsts will be printed in the results text file.
    """
    #combine the segments and options (combinations) into one iterable
    paths = (segments_dir, options_dir)
    baseline = Model(baseline_dir)

    for path, dirs, files in chain.from_iterable(
            os.walk(path) for path in paths):

        for f in files:
            if '.inp' in f:
                inp_path = os.path.join(path, f)
                alt = Model(inp_path)

                #calculate the cost
                costsdf = functions.estimate_cost_of_new_conduits(
                    baseline, alt, supplemental_cost_data)
                cost_estimate = costsdf.TotalCostEstimate.sum() / math.pow(
                    10, 6)
                print('{}: ${}M'.format(alt.name, round(cost_estimate, 1)))

                model_id = os.path.splitext(f)[0]
                with open(results_file, 'a') as res:
                    res.write('{}, {}\n'.format(model_id, cost_estimate))

                if create_proj_reports:
                    #create a option-specific per segment costing csv file
                    report_dir = os.path.join(alt.inp.dir, REPORT_DIR_NAME)
                    fname = '{}_CostEstimate_{}.csv'.format(
                        alt.name, strftime("%y%m%d"))
                    cost_report_path = os.path.join(report_dir, fname)
                    if not os.path.exists(report_dir): os.mkdir(report_dir)
                    costsdf.to_csv(cost_report_path)
Example #6
0
def batch_post_process(options_dir,
                       baseline_dir,
                       log_dir,
                       bbox=None,
                       overwrite=False):
    """
    DEPRECIATED

    batch process all models in a given directory, where child directories
    each model (with .inp and .rpt companions). A bbox should be passed to
    control where the grahics are focused. Specify whether reporting content
    should be overwritten if found.
    """
    baseline = Model(baseline_dir)
    folders = os.listdir(options_dir)
    logfile = os.path.join(log_dir, 'logfile.txt')
    with open(logfile, 'a') as f:
        f.write('MODEL,NEW_SEWER_MILES,IMPROVED,ELIMINATED,WORSE,NEW\n')
    for folder in folders:
        #first check if there is already a Report directory and skip if required
        current_dir = os.path.join(options_dir, folder)
        report_dir = os.path.join(current_dir, REPORT_DIR_NAME)
        if not overwrite and os.path.exists(report_dir):
            print('skipping {}'.format(folder))
            continue

        else:
            #generate the report
            current_model = Model(current_dir)
            print('Generating report for {}'.format(current_model.inp.name))
            #reporting.generate_figures(baseline, current_model, bbox=bbox, imgDir=report_dir, verbose=True)
            report = reporting.Report(baseline, current_model)
            report.write(report_dir)

            #keep a summay log
            with open(logfile, 'a') as f:
                #'MODEL,NEW_SEWER_MILES,IMPROVED,ELIMINATED,WORSE,NEW'
                f.write('{},{},{},{},{},{}\n'.format(
                    current_model.inp.name, report.sewer_miles_new,
                    report.parcels_flooding_improved,
                    report.parcels_eliminated_flooding,
                    report.parcels_worse_flooding,
                    report.parcels_new_flooding))
Example #7
0
def propagate_changes_from_baseline(baseline_dir, alternatives_dir, combi_dir,
                                    version_id='', comments=''):

    """
    if the baseline model has changes that need to be propogated to all models,
    iterate through each model and rebuild the INPs with the new baseline and
    existing build instructions. update the build instructions to reflect the
    revision date of the baseline.
    """
    version_id += '_' + datetime.now().strftime("%y%m%d%H%M%S")

    #collect the directories of all models
    model_dirs = []
    for alt in os.listdir(alternatives_dir):
        #print alt
        #iterate through each implementation level of each alternative
        for imp_level in os.listdir(os.path.join(alternatives_dir, alt)):
            #create or refresh the build instructions file for the alternatives
            model_dirs.append(os.path.join(alternatives_dir, alt, imp_level))

    model_dirs += [os.path.join(combi_dir, x) for x in os.listdir(combi_dir)]
    #print model_dirs
    baseline = Model(baseline_dir)
    baseinp = baseline.inp.path

    for model_dir in model_dirs:
        model = Model(model_dir)
        vc_directory = os.path.join(model_dir, 'vc')
        latest_bi = vc_utils.newest_file(vc_directory)

        #update build instructions metdata and build the new inp
        bi = inp.BuildInstructions(latest_bi)
        bi.metadata['Parent Models']['Baseline'] = {baseinp:vc_utils.modification_date(baseinp)}
        bi.metadata['Log'].update({version_id:comments})
        bi.save(vc_directory, version_id+'.txt')
        print('rebuilding {} with changes to baseline'.format(model.name))
        bi.build(baseline_dir, model.inp.path) #overwrite old inp
Example #8
0
def run_hot_start_sequence(inp_path, swmm_eng=SWMM_ENGINE_PATH):

    # inp_path = model.inp.path
    model = Model(inp_path)
    rpt_path = os.path.splitext(inp_path)[0] + '.rpt'
    hotstart1 = os.path.join(model.inp.dir, model.inp.name + '_hot1.hsf')
    hotstart2 = os.path.join(model.inp.dir, model.inp.name + '_hot2.hsf')

    # if not os.path.exists(hotstart1) and not os.path.exists(hotstart2):
    #create new model inp with params to save hotstart1
    print('create new model inp with params to save hotstart1')
    s = pd.Series(['SAVE HOTSTART "{}"'.format(hotstart1)])
    hot1_df = pd.DataFrame(s, columns=['[FILES]'])
    model = replace_inp_section(model.inp.path, '[FILES]', hot1_df)
    model = replace_inp_section(model.inp.path, '[REPORT]', defs.REPORT_none)
    model = replace_inp_section(model.inp.path, '[OPTIONS]',
                                defs.OPTIONS_no_rain)
    subprocess.call([swmm_eng, model.inp.path, rpt_path])

    # if os.path.exists(hotstart1) and not os.path.exists(hotstart2):
    #create new model inp with params to use hotstart1 and save hotstart2
    print('with params to use hotstart1 and save hotstart2')
    s = pd.Series([
        'USE HOTSTART "{}"'.format(hotstart1),
        'SAVE HOTSTART "{}"'.format(hotstart2)
    ])
    hot2_df = pd.DataFrame(s, columns=['[FILES]'])
    model = replace_inp_section(model.inp.path, '[FILES]', hot2_df)
    subprocess.call([swmm_eng, model.inp.path, rpt_path])

    # if os.path.exists(hotstart2):
    #create new model inp with params to use hotstart2 and not save anything
    print('params to use hotstart2 and not save anything')
    s = pd.Series(['USE HOTSTART "{}"'.format(hotstart2)])
    hot3_df = pd.DataFrame(s, columns=['[FILES]'])

    model = replace_inp_section(model.inp.path, '[FILES]', hot3_df)
    model = replace_inp_section(model.inp.path, '[REPORT]',
                                defs.REPORT_none)  # defs.REPORT_nodes_links)
    model = replace_inp_section(model.inp.path, '[OPTIONS]',
                                defs.OPTIONS_normal)

    subprocess.call([swmm_eng, model.inp.path, rpt_path])
def test_model():
    return Model(MODEL_FULL_FEATURES__NET_PATH)
def test_model_01():
    return Model(MODEL_FULL_FEATURES_XY)
Example #11
0
def batch_reports(project_dir,
                  results_file,
                  additional_costs=None,
                  join_data=None,
                  report_dirname='Report_AllParcels'):

    #combine the segments and options (combinations) into one iterable
    SEGMENTS_DIR = os.path.join(project_dir, 'Segments')
    COMBOS_DIR = os.path.join(project_dir, 'Combinations')
    COMMON_DATA_DIR = os.path.join(project_dir, 'CommonData')
    ADMIN_DIR = os.path.join(project_dir, 'ProjectAdmin')
    BASELINE_DIR = os.path.join(project_dir, 'Baseline')

    #instantiate the true baseline flood report
    baseline_model = Model(BASELINE_DIR)
    pn_join_csv = os.path.join(COMMON_DATA_DIR,
                               r'sphila_sheds_parcels_join.csv')
    parcel_node_join_df = pd.read_csv(pn_join_csv)
    parcel_shp_df = spatial.read_shapefile(sg.config.parcels_shapefile)
    baserpt = reporting.FloodReport(baseline_model, parcel_node_join_df)
    base_flood_vol = baserpt.flood_vol_mg

    paths = (SEGMENTS_DIR, COMBOS_DIR)
    #result file header
    cols = 'MODEL,COST,FLOOD_VOL_MG,PARCEL_FLOOD_HRS,FLOOD_VOL_REDUCED_MG,PARCEL_FLOOD_HRS_REDUCED,PARCEL_HRS_REDUCED_DELTA_THRESH'
    with open(results_file, 'a') as f:
        f.write(cols + '\n')

    for path, dirs, files in chain.from_iterable(
            os.walk(path) for path in paths):

        for f in files:
            if '.inp' in f:
                inp_path = os.path.join(path, f)
                alt = Model(inp_path)
                print('reporting on {}'.format(alt.name))
                #generate the reports
                frpt = reporting.FloodReport(alt, parcel_node_join_df)
                impact_rpt = reporting.ComparisonReport(
                    baserpt, frpt, additional_costs, join_data)

                #write to the log
                model_id = os.path.splitext(f)[0]
                with open(results_file, 'a') as f:

                    stats = (
                        model_id,
                        impact_rpt.cost_estimate,
                        frpt.flood_vol_mg,
                        frpt.parcel_hrs_flooded,
                        baserpt.flood_vol_mg - frpt.flood_vol_mg,
                        baserpt.parcel_hrs_flooded - frpt.parcel_hrs_flooded,
                        impact_rpt.parcel_hours_reduced,
                    )
                    f.write('{},{},{},{},{},{},{}\n'.format(*stats))

                report_dir = os.path.join(alt.inp.dir, report_dirname)
                if not os.path.exists(report_dir): os.mkdir(report_dir)

                #write the report files
                impact_rpt.write(report_dir)
                impact_rpt.generate_figures(report_dir, parcel_shp_df)
                serialize.encode_report(impact_rpt,
                                        os.path.join(report_dir, 'rpt.json'))
Example #12
0
def create_combinations(baseline_dir, rsn_dir, combi_dir, version_id='',
                        comments=''):

    """
    Generate SWMM5 models of each logical combination of all implementation
    phases (IP) across all relief sewer networks (RSN).

    Inputs:
        baseline_dir -> path to directory containing the baseline SWMM5 model
        rsn_dir ->      path to directory containing subdirectories for each RSN
                        containing directories for each IP within the network
        combi_dir ->    target directory in which child models will be created
        version_id ->   identifier for a given version (optional)
        comments ->     comments tracked within build instructions log for
                        each model scenario (optional)

    Calling create_combinations will update child models if parent models have
    been changed.

    """

    baseinp = Model(baseline_dir).inp.path
    version_id += '_' + datetime.now().strftime("%y%m%d%H%M%S")

    #create a list of directories pointing to each IP in each RSN
    RSN_dirs = [os.path.join(rsn_dir, rsn) for rsn in os.listdir(rsn_dir)]
    IP_dirs = [os.path.join(d, ip) for d in RSN_dirs for ip in os.listdir(d)]

    #list of lists of each IP within each RSN, including a 'None' phase.
    IPs = [[None] + os.listdir(d) for d in RSN_dirs]

    #identify all scenarios (cartesian product of sets of IPs between each RSN)
    #then isolate child scenarios with atleast 2 parents (sets with one parent
    #are already modeled as IPs within the RSNs)
    all_scenarios = [[_f for _f in s if _f] for s in itertools.product(*IPs)]
    child_scenarios = [s for s in all_scenarios if len(s) > 1]

    #notify user of what was initially found
    str_IPs = '\n'.join([', '.join([_f for _f in i if _f]) for i in IPs])
    print(('Found {} implementation phases among {} networks:\n{}\n'
           'This yeilds {} combined scenarios ({} total)'.format(len(IP_dirs),
            len(RSN_dirs),str_IPs,len(child_scenarios),len(all_scenarios) - 1)))

    # ==========================================================================
    # UPDATE/CREATE THE PARENT MODEL BUILD INSTRUCTIONS
    # ==========================================================================
    for ip_dir in IP_dirs:
        ip_model = Model(ip_dir)
        vc_dir = os.path.join(ip_dir, 'vc')

        if not os.path.exists(vc_dir):
            print('creating new build instructions for {}'.format(ip_model.name))
            inp.create_inp_build_instructions(baseinp, ip_model.inp.path,
                                              vc_dir,
                                              version_id, comments)
        else:
            #check if the alternative model was changed since last run of this tool
            #--> compare the modification date to the BI's modification date meta data
            latest_bi = vc_utils.newest_file(vc_dir)
            if not vc_utils.bi_is_current(latest_bi):
                #revision date of the alt doesn't match the newest build
                #instructions for this 'imp_level', so we should refresh it
                print('updating build instructions for {}'.format(ip_model.name))
                inp.create_inp_build_instructions(baseinp, ip_model.inp.path,
                                                  vc_dir, version_id,
                                                  comments)

    # ==========================================================================
    # UPDATE/CREATE THE CHILD MODELS AND CHILD BUILD INSTRUCTIONS
    # ==========================================================================
    for scen in child_scenarios:
        newcombi = '_'.join(sorted(scen))
        new_dir = os.path.join(combi_dir, newcombi)
        vc_dir = os.path.join(combi_dir, newcombi, 'vc')

        #parent model build instr files
        #BUG (this breaks with model IDs with more than 1 char)
        parent_vc_dirs = [os.path.join(rsn_dir, f[0], f, 'vc') for f in scen]
        latest_parent_bis = [vc_utils.newest_file(d) for d in parent_vc_dirs]
        build_instrcts = [inp.BuildInstructions(bi) for bi in latest_parent_bis]

        if not os.path.exists(new_dir):

            os.mkdir(new_dir)
            newinppath = os.path.join(new_dir, newcombi + '.inp')

            print('creating new child model: {}'.format(newcombi))
            new_build_instructions = sum(build_instrcts)
            new_build_instructions.save(vc_dir, version_id+'.txt')
            new_build_instructions.build(baseline_dir, newinppath)

        else:
            #check if the alternative model was changed since last run
            #of this tool --> compare the modification date to the BI's
            #modification date meta data
            latest_bi = vc_utils.newest_file(os.path.join(new_dir,'vc'))
            if not vc_utils.bi_is_current(latest_bi):
                #revision date of the alt doesn't match the newest build
                #instructions for this 'imp_level', so we should refresh it
                print('updating child build instructions for {}'.format(newcombi))
                newinppath = os.path.join(new_dir, newcombi + '.inp')
                new_build_instructions = sum(build_instrcts)
                new_build_instructions.save(vc_dir, version_id+'.txt')
                new_build_instructions.build(baseline_dir, newinppath)
Example #13
0
from swmmio import Model
from swmmio.tests.data import MODEL_A_PATH
from swmmio.tests.data import MODEL_FULL_FEATURES_XY, MODEL_FULL_FEATURES__NET_PATH, MODEL_FULL_FEATURES_XY_B

# example models
philly = Model(MODEL_A_PATH, crs="+init=EPSG:2817")
jersey = Model(MODEL_FULL_FEATURES_XY)
jerzey = Model(MODEL_FULL_FEATURES_XY_B)
spruce = Model(MODEL_FULL_FEATURES__NET_PATH)