def delay_job(i):
    loginfo("Simmulation " + str(i) + " of " + str(nsims))
    # create path to the 1-time-use dll file we are going to create
    dll_i = dll_bn[:dll_bn.rindex(".")] + '-' + str(i) + dll_bn[dll_bn.rindex("."):]
    lib_path = os.path.join(dll_dir, dll_i)
    # create 1-time-use dll file copy
    shutil.copyfile(dll_path, lib_path)
    # specify the directory with the file pyswmm needs by attaching the folder id to the rest of the folder's absolute path
    sim_dir = inp_dir_prefix + str(i)
    # specify the actual file pyswmm needs
    sim_path = os.path.join(sim_dir, r'NPlesantCreek.inp')
    #print("Simulation input file found:", sim_path)
    # specify the file that pyswmm will (over)write with output after running the probabilistic simulation
    sim_bin_path = os.path.join(sim_dir, "NPlesantCreek.out")
    # delete pre-existing .out, if present, in order to run swmm agreeably
    if os.path.exists(sim_bin_path):
        loginfo("Deleting current copy of <" + sim_bin_path + "> so new copy can be created.")
        #print("Deleting current copy of <NPlesantCreek.out> so new copy can be created.")
        os.remove(sim_bin_path)
    # stagger starting times 1 sec apart
    time.sleep(i)
    # load the model {no interaction, write (binary) results to <JS_NPlesantCreek.out>, use the specified dll}
    sim = Simulation(inputfile=sim_path, reportfile=None, outputfile=sim_bin_path, swmm_lib_path=lib_path)
    # simulate the loaded model
    loginfo("Executing SWMM simmulation with no interaction. Input from <" + sim_path + ">. Will store output in <" + sim_bin_path + ">.")
    # sim.execute()
    with sim as s:
        for step in s:
            pass

    # extract swmm outputs with swmmtoolbox and delete expensive binary files
    lab1 = 'subcatchment,,Runoff_rate'
    lab2 = 'subcatchment,,Bifenthrin'
    runf_stack = swmmtoolbox.extract(sim_bin_path, lab1)
    bif_stack = swmmtoolbox.extract(sim_bin_path, lab2)

    loginfo("Deleting <" + sim_bin_path + "> to free up memory.")
    os.remove(sim_bin_path)
    loginfo("Deleting <" + dll_i + "> to free up memory.")
    os.system("rm " + lib_path)
    print("Deleted <input_" + str(i) + "/JS_NPlesantCreek.out> and <" + dll_i + "> to free up memory.")    

    # compute and export daily averages to csv files and finish
    runf_davg = runf_stack.resample('D').mean()
    bif_davg = bif_stack.resample('D').mean()
    print(save_and_finish(runf_davg, os.path.join(sim_dir, "swmm_output_davg_runf.csv")))
    print(save_and_finish(bif_davg, os.path.join(sim_dir, "swmm_output_davg_bif.csv")))
    # msg1 and msg2 text will be the same, but we must do both to save both csvs
    
    # a message to indicate success
    return("file " + str(i) + " simulated!")
Ejemplo n.º 2
0
    def time_series_vars_to_db(parte):
        nonlocal count
        series_df = swmm.extract(model_out, tipo + ',' + parte + ',')
        series_df.columns = [
            col[len(tipo + '_' + parte + '_'):].lower()
            for col in series_df.columns
        ]
        series_df.reset_index(inplace=True)
        series_df = series_df.rename({'index': 'elapsed_time'}, axis=1)
        series_df[tipo + '_id'] = parte
        series_df['event_id'] = evento
        series_df = series_df[cols_tipo]

        series.get(tipo).update({parte: series_df})
        print(tipo, parte)

        tabla = 'events_' + tipo + 's'

        # session_factory = sessionmaker(bind=engine_base_ina)
        # Session = scoped_session(session_factory)
        engine_base_ina.dispose()
        series.get(tipo).get(parte).to_sql(tabla,
                                           conn,
                                           index=False,
                                           if_exists='append')

        # Session.remove()

        count += 1
        print(tipo + ': ' + str(count) + ' de ' + str(len(partes)))

        return series
Ejemplo n.º 3
0
def outflux_1(client, outfile, control_locations):
    # Pick a point from the points of interest
    for tag in control_locations.to_dict(orient='records'):
        # Generate the series from the output file
        series = sb.extract(
            outfile,
            sm.make_extract_string(tag["name"], tag["type"], tag["measure"]))
        # make time to unix
        time = convert_timestamps(series)
        # make the line
        data = generate_data(time, series, tag)
        # write to influx!
        data = '\n'.join(data)
        u = "username"
        p = "password"
        port = 8086
        db = "db"
        host = "your_url"
        post_header = {
            'Host': '%s:%s' % (host, port),
            'Connection': 'close',
            'Content-Length': '%d' % len(data),
            'Content-Type': 'plain/text'
        }

        r = requests.post('http://%s:%s/write?db=%s&u=%s&p=%s' %
                          (host, port, db, u, p),
                          headers=post_header,
                          data=data)
        print(r.status_code)
Ejemplo n.º 4
0
def count_CSO_events(self, inp, CSO_ids, model_outfile):
    # create the dataframe with the
    max_event_length = 1  # day
    CSO_event_seperation = 12  # hours

    if inp.CSO_type == 'Outflow from CSO structure':
        CSO_type = 'Total_inflow'
    elif inp.CSO_type == 'Flooding above ground from node':
        CSO_type = 'Flow_lost_flooding'

    df = pd.concat(
        ((swmmtoolbox.extract(model_outfile, ['node', CSO_ids[i], CSO_type]))
         for i in range(len(CSO_ids))),
        axis=1)
    # df is CMS therefore this are converted to m3/timestep.
    df = df * 60 * inp.report_times_steps

    # Set all timesteps with flooding to 1
    CSO_YesNo_df = df.mask(df > 0, other=1)
    # Time between events is assumed to be 12 hours before they are counted as seperate
    time_between_events = CSO_event_seperation * 60 / inp.report_times_steps

    # Get df with 1 at every event start
    CSO_start_df = CSO_YesNo_df.where(np.logical_and(
        CSO_YesNo_df > 0,
        CSO_YesNo_df.rolling(int(time_between_events),
                             min_periods=0).sum() == 1),
                                      other=0)
    # Get df with 1 at every event stop
    CSO_stop_df = CSO_YesNo_df.where(np.logical_and(
        CSO_YesNo_df > 0,
        CSO_YesNo_df.rolling(
            int(time_between_events),
            min_periods=0).sum().shift(-(int(time_between_events) -
                                         1)).fillna(0) == 1),
                                     other=0)

    # Counter for each CSO structure
    CSO_event_counter = dict(zip(CSO_ids, np.zeros(len(CSO_ids))))
    for count, col in enumerate(CSO_start_df.columns):
        max_dur = timedelta(max_event_length).days
        start_index = np.where(CSO_start_df[col] == 1)[0]
        stop_index = np.where(CSO_stop_df[col] == 1)[0]
        # The end is added to the stop index if overflow continues until the end of simulation
        if len(start_index) != len(stop_index):
            stop_index = np.append(stop_index, len(CSO_start_df) - 1)
        start_time = CSO_start_df.iloc[start_index].index
        stop_time = CSO_stop_df.iloc[stop_index].index
        duration = stop_time - start_time
        # If events are longer than maximum duration then the overflow is counted once for each day it lasts.
        for i, dur in enumerate(duration):
            if dur.days > max_dur:
                CSO_event_counter[CSO_ids[count]] += dur.days
            else:
                CSO_event_counter[CSO_ids[count]] += 1
    # CSO_event_counter contain information for each CSO structure
    # total_CSO_events contain the total number of CSO's
    total_CSO_events = sum(CSO_event_counter.values())
    return total_CSO_events
Ejemplo n.º 5
0
def time_series_vars_to_db(model_out, tipo, evento, conn, sub_set, cols_tipo):
    """ Esta función genera un diccionario con todas las series temporales del modelo para una tipo.
        El diccionario tiene la siguiente estructura:
            {tipo :
                {parte:
                    {variable: serie}
                }
            }
        Argumentos:
            model.out: el archivo .out que devuelve swmm
            tipo: str --> The type are "subcatchment", "node", "link", "pollutant", "system".
            evento: ID del evento
        Return:
            dicc
            # hace un insert en la base de datos
    """
    series = {tipo: {}}
    partes = set(
        [item[1] for item in swmm.catalog(model_out) if item[0] == tipo])

    if len(sub_set) == 0:
        partes = [parte for parte in partes]
    else:
        partes = [parte for parte in partes if parte in sub_set]
    print('Cantidad de partes:', len(partes), partes)
    count = 0

    for parte in partes:
        series_df = swmm.extract(model_out, tipo + ',' + parte + ',')
        series_df.columns = [
            col[len(tipo + '_' + parte + '_'):].lower()
            for col in series_df.columns
        ]
        series_df.reset_index(inplace=True)
        series_df = series_df.rename({'index': 'elapsed_time'}, axis=1)
        series_df[tipo + '_id'] = parte
        series_df['event_id'] = evento
        series_df = series_df[cols_tipo]

        series.get(tipo).update({parte: series_df})
        print(tipo, parte)

        tabla = 'events_' + tipo + 's'

        # session_factory = sessionmaker(bind=engine_base_ina)
        # Session = scoped_session(session_factory)
        engine_base_ina.dispose()
        series.get(tipo).get(parte).to_sql(tabla,
                                           conn,
                                           index=False,
                                           if_exists='append')

        # Session.remove()

        count += 1
        print(tipo + ': ' + str(count) + ' de ' + str(len(partes)))

    return series
Ejemplo n.º 6
0
def outflux(client, outfile, control_locations):
    # Pick a point from the points of interest
    for tag in control_locations.to_dict(orient='records'):
        # Generate the series from the output file
        series = sb.extract(
            outfile,
            sm.make_extract_string(tag["name"], tag["type"], tag["measure"]))
        # make time to unix
        time = convert_timestamps(series)
        # make the line
        data = generate_data(time, series, tag)
        # write to influx!
        to_influx(client, data)
Ejemplo n.º 7
0
def count_CSO_volume(self, inp, CSO_ids, model_outfile):
    if inp.CSO_type == 'Outflow from CSO structure':
        CSO_type = 'Total_inflow'
    elif inp.CSO_type == 'Flooding above ground from node':
        CSO_type = 'Flow_lost_flooding'
    df = pd.concat(
        ((swmmtoolbox.extract(model_outfile, ['node', CSO_ids[i], CSO_type]))
         for i in range(len(CSO_ids))),
        axis=1)
    df = df * inp.report_times_steps * 60
    # CSO_volume_events = df.sum().sum()
    CSO_volume_total = df.sum().sum()

    return CSO_volume_total
Ejemplo n.º 8
0
def basins_cross_correlation(outfile, basins, depths):
    """
    8/2-21
    Computes the correlation between depths in the different basins. 
    Can be used to check if filling degrees are similar or if some basins vary in filling. 
    
    Parameters
    ----------
    outfile : TYPE
        DESCRIPTION.
    basins : TYPE
        DESCRIPTION.
    depths : TYPE
        DESCRIPTION.

    Returns
    -------
    None.

    """

    # outfile = r'C:\Users\magnu\OneDrive\DTU\NOAH_project_local\github\NOAH\NOAH_RTC_Tool\output\2021-02-08_11-39-14\Astlingen_EFD.out'
    # basins = ('T1','T2','T3','T4','T5','T6')
    # depths = [5,5,5,5,5,5]

    frames = [
        swmmtoolbox.extract(outfile, ['node', basin, 'Depth_above_invert']) /
        depths[i] for i, basin in enumerate(basins)
    ]
    filling_degrees = pd.concat(frames, axis=1)
    filling_degrees.columns = basins

    # Compute correlations
    Corr_matrix = filling_degrees.corr()
    print('Correlation matrix is:\n', Corr_matrix)

    # Make a simple plot
    ticks = range(len(basins))
    labels = basins

    plt.figure()
    plt.matshow(Corr_matrix)
    plt.xticks(ticks, labels)
    # plt.yticks(ticks, labels)
    cb = plt.colorbar()
    cb.ax.tick_params(labelsize=14)
    # plt.title('Correlation Matrix', fontsize=10);
    plt.show()
Ejemplo n.º 9
0
    def run(self, *params, named_model_params=None, plot_results=False):
        """
        Runs the SWMM model with specific parameters. The following parameters can be passed.
        :param named_model_params: dictionary of named model parameters. Replaces unnamed params
        :param multiple unnamed params: 1st: surface roughness
        :return: the simulation of the model
        """

        # if only unnamed params are given, change how parameters are stored in order to feed to swmm model
        model_params = {}
        if named_model_params is None:
            for key, value in self.cal_params.items():
                model_params[key] = params[value['rank']]
        else:
            model_params = named_model_params

        # Check model params
        if not self.check_parameters(model_params):
            return self.observations * -10000

        # Apply model params to model
        self.apply_parameters(model_params)

        # Run model
        with open(os.devnull, "w") as f:
            subprocess.call([
                self.swmm_executable, self.temp_model, self.report_file,
                self.output_file
            ],
                            stdout=f)

        # read simulation output
        data = swmmtoolbox.extract(
            self.output_file,
            *[','.join(x['swmm_node']) for x in self.obs_config])
        # rename index
        data.index.rename('datetime', inplace=True)

        # renaming data columns
        rename_dict = dict(('_'.join(o['swmm_node']), o['swmm_node'][1])
                           for o in self.obs_config)
        self.simulation = data.rename(index=str, columns=rename_dict)

        # plot results if necessary
        if plot_results:
            self.plot()

        return self.simulation
Ejemplo n.º 10
0
def Compute_CSO_statistics(inp,
                           CSO_ids,
                           model_outfile,
                           allow_CSO_different_locations=True):
    """
    Takes an .out file from SWMM and computes statistics for the specified nodes. 
    This can be to compute CSO volumes or count events. 

    Parameters
    ----------
    inp : TYPE
        Input file with all parameters from the NOAH RTC Tool.
    CSO_ids : TYPE
        List of node IDs that are to be computed.
    model_outfile : TYPE
        The .out file from the SWMM simulation.
    allow_CSO_different_locations: Binary (True/False)
        Defines whether the CSO's are computed fro each node or if they are combined only the different times count as different CSO's
        This is used for one recipient only and disregards the spatial distribution of the nodes. 
        True if CSO's from each node is computed individually
        False if the CSO's are combined into one timeseries that is used for the events. 
        
    Returns 
    -------
    DataFrame with statistics for each of the nodes
    """
    # create the dataframe with the
    max_event_length = 1  # day
    # Time between events is assumed to be 12 hours before they are counted as seperate
    CSO_event_seperation = 6  # hours

    if inp.CSO_type == 'Outflow from CSO structure':
        CSO_type = 'Total_inflow'
    elif inp.CSO_type == 'Flooding above ground from node':
        CSO_type = 'Flow_lost_flooding'

    df = pd.concat(
        ((swmmtoolbox.extract(model_outfile, ['node', CSO_ids[i], CSO_type]))
         for i in range(len(CSO_ids))),
        axis=1)
    # df is CMS therefore this are converted to m3/timestep.
    df = df * 60 * inp.report_times_steps
    # pdb.set_trace()
    if allow_CSO_different_locations == False:
        df = pd.DataFrame(df.max(axis=1))
        CSO_ids = ['Sum']
    df.columns = CSO_ids
    # Set all timesteps with flooding to 1
    CSO_YesNo_df = df.mask(df > 0, other=1)
    # Time between events in timesteps
    time_between_events = CSO_event_seperation * 60 / inp.report_times_steps
    # time_between_events = timedelta(hours = CSO_event_seperation)
    # Get df with 1 at every event start
    CSO_start_df = CSO_YesNo_df.where(np.logical_and(
        CSO_YesNo_df > 0,
        CSO_YesNo_df.rolling(int(time_between_events),
                             min_periods=0).sum() == 1),
                                      other=0)
    # Get df with 1 at every event stop
    CSO_stop_df = CSO_YesNo_df.where(np.logical_and(
        CSO_YesNo_df > 0,
        CSO_YesNo_df.rolling(
            int(time_between_events),
            min_periods=0).sum().shift(-(int(time_between_events) -
                                         1)).fillna(0) == 1),
                                     other=0)

    empty_dict = {
        'Event_ID': [],
        'Node': [],
        'Start_time': [],
        'End_time': [],
        'Duration': [],
        'Volume': []
    }
    # pdb.set_trace()
    (start_index, node_ID) = np.where(CSO_start_df == 1)
    start_time = CSO_start_df.iloc[start_index].index
    # Each start time corresponds to one event.
    no_events = len(start_time)
    # computes statistics for each event
    df_stats = pd.DataFrame(empty_dict)
    for i in range(no_events):
        tmp_dict = empty_dict.copy()
        tmp_dict['Event_ID'] = i + 1
        tmp_dict['Node'] = CSO_ids[node_ID[i]]
        tmp_dict['Start_time'] = start_time[i]
        stop_index = np.where(
            np.logical_and(
                CSO_stop_df[CSO_ids[node_ID[i]]].index >= start_time[i],
                CSO_stop_df[CSO_ids[node_ID[i]]] == 1))
        # pdb.set_trace()
        tmp_dict['End_time'] = CSO_stop_df.index[stop_index[0]][0]
        tmp_dict['Duration'] = tmp_dict['End_time'] - tmp_dict['Start_time']
        tmp_dict['Volume'] = df[CSO_ids[node_ID[i]]].loc[
            tmp_dict['Start_time']:tmp_dict['End_time']].sum()
        df_stats = df_stats.append(tmp_dict, ignore_index=True)

    # Test if any of the events are longer than the maximum allowed before they are counted as more events
    try:
        df_stats['Duration'] > timedelta(hours=max_event_length)
    except:
        print('Some events last longer than the maxium allowed duration.')

    # All events are ranked after volume. Biggest event has rank = 1
    df_stats['Rank'] = df_stats['Volume'].rank(ascending=False)

    # Events for each node are ranked after volume. Biggest event has rank = 1
    df_stats['Node Rank'] = df_stats.groupby('Node')['Volume'].rank(
        ascending=False)

    # Event_ID is used as index
    df_stats.set_index(df_stats['Event_ID'], inplace=True)
    return df_stats
    def model(params1):

        # #### Set up input variables and simulation id

        # In[13]:

        if mode == "debug":
            # Fill in params1 with whichever of the 34 parameters are not provided as inputs with the defaults from test_params
            for key, value in params1.items():
                params[key] = value
            params1 = params

        # Error prevention:
        # In real life, these situations would be impossible.
        # Since these parameters are simulated, they need to be watched out for and fixed.
        # If they are overlooked, the model will fail early during simulation.
        if params1['MaxRate'] < params1['MinRate']:
            params1['MaxRate'], params1['MinRate'] = params1[
                'MinRate'], params1['MaxRate']
        if params1['FC'] < params1['WP']:
            params1['FC'], params1['WP'] = params1['WP'], params1['FC']

        # partition parameters into the ones for swmm and the ones for vvwm
        swmm_keys = list(params1.keys())[:16]  # 3/2
        vvwm_keys = list(params1.keys())[16:]  # 3/2
        swmm_params = {key: params1[key] for key in swmm_keys}
        vvwm_params = {key: params1[key] for key in vvwm_keys}

        # In[14]:

        # spin up simulation id with this cool too for generating random ids
        sid = uuid.uuid4().hex[0:8]
        # set up logging
        loginfo, logerror = log_prefixer(sid)

        # #### Make simulation-specific SWMM items

        # In[15]:

        # make paths to (soon-to-be) directory & files for this simulation using sid

        # directory
        sdir_path = os.path.join(temp_path, sid)
        # input file
        sinp_path = os.path.join(sdir_path, sid) + ".inp"
        # output file
        sout_path = os.path.join(sdir_path, sid) + ".out"
        # report file
        srpt_path = os.path.join(sdir_path, sid) + ".rpt"
        # dynamic link library file
        ## dll basename
        sdll_bn = dll_bn[:dll_bn.
                         rindex(".")] + '-' + sid + dll_bn[dll_bn.rindex("."):]
        ## dll full path
        sdll_path = os.path.join(sdir_path, sdll_bn)

        # In[16]:

        # make the directory for which we just constructed a path
        if not os.path.exists(sdir_path):
            os.mkdir(sdir_path)
            print("Folder ", sid, " created", "\n")
        else:
            print("Folder ", sid, "already exists")

        # make swmm input file:
        with open(inp_path, "r") as read_file, open(sinp_path,
                                                    "w") as write_file:
            filelines = read_file.readlines()

            # first we need to correct some absolute paths, because they are currently only set to work on the author's computer
            filelines = replace_infile_abspaths(filelines=filelines)

            # 113 = number of subcatchments
            #for c, par in enumerate(["NImperv", "NPerv", "SImperv", "SPerv", "PctZero"]):
            for c, par in enumerate(swmm_keys[0:5]):
                filelines[172:(172 + 113)] = editted_lines(
                    swmm_dict=swmm_params,
                    Num=113,
                    row_0=172,
                    parameter=par,
                    Col=c + 1,
                    flines=filelines)
            #for c, par in enumerate(["MaxRate", "MinRate", "Decay", "DryTime"]):
            for c, par in enumerate(swmm_keys[5:9]):
                filelines[289:(289 + 113)] = editted_lines(
                    swmm_dict=swmm_params,
                    Num=113,
                    row_0=289,
                    parameter=par,
                    Col=c + 1,
                    flines=filelines)

            # 1 = number of aquifers
            #for c, par in enumerate(["Por", "WP", "FC", "Ksat"]):
            for c, par in enumerate(swmm_keys[9:13]):
                filelines[406:(406 + 1)] = editted_lines(swmm_dict=swmm_params,
                                                         Num=1,
                                                         row_0=406,
                                                         parameter=par,
                                                         Col=c + 1,
                                                         flines=filelines)

            # 1 = number of pollutants
            filelines[1125:(1125 + 1)] = editted_lines(swmm_dict=swmm_params,
                                                       Num=1,
                                                       row_0=1125,
                                                       parameter="Kdecay",
                                                       Col=5,
                                                       flines=filelines)
            filelines[1371:(1371 + 1)] = editted_lines(swmm_dict=swmm_params,
                                                       Num=1,
                                                       row_0=1371,
                                                       parameter="BCoeff2",
                                                       Col=4,
                                                       flines=filelines)
            filelines[1377:(1377 + 1)] = editted_lines(swmm_dict=swmm_params,
                                                       Num=1,
                                                       row_0=1377,
                                                       parameter="WCoeff2",
                                                       Col=4,
                                                       flines=filelines)

            # write the changes to the file
            write_file.writelines(filelines)

        # copy a dll file into sdll_path
        shutil.copyfile(dll_path, sdll_path)

        # #### Execute simulation
        # In test mode, should take about 2-3 minutes.
        # In run mode, should take a while.

        # In[17]:

        # Error prevention
        # delete pre-existing .out, if present, in order to run swmm agreeably
        if os.path.exists(sout_path):
            loginfo("Deleting current copy of <" + sout_path +
                    "> so new copy can be created.")
            #print("Deleting current copy of <NPlesantCreek.out> so new copy can be created.")
            os.remove(sout_path)

        # load the model {no interaction, write (binary) results to sout_path, use the specified dll}
        sim = Simulation(inputfile=sinp_path,
                         reportfile=srpt_path,
                         outputfile=sout_path,
                         swmm_lib_path=sdll_path)
        # simulate the loaded model
        loginfo(
            "Executing SWMM simmulation with no interaction. Input from <" +
            sinp_path + ">. Will store output in <" + sout_path + ">.")
        # use for-loop to avoid runtime costs due to excessive logging
        with sim as s:
            for step in s:
                pass

        # #### Get the info to a safe place and then clean up

        # In[18]:

        # extract swmm outputs with swmmtoolbox and delete expensive binary files
        lab1, lab2 = 'subcatchment,,Runoff_rate', 'subcatchment,,Bifenthrin'
        runf = swmmtoolbox.extract(sout_path, lab1)
        bif = swmmtoolbox.extract(sout_path, lab2)

        # In[19]:

        # clean up
        if swmm_cleanup == 'full':
            loginfo("Deleting swmm temp files to free up memory.")
            rm(os.path.join(sdir_path, "*"))
        elif swmm_cleanup == 'some':
            loginfo("Deleting large swmm temp files to free up memory.")
            rm(sout_path, sdll_path)
        elif swmm_cleanup == 'none':
            loginfo("Deleting swmm dll file to free up memory.")
            rm(sdll_path)

        # In[20]:

        # compute daily averages
        runf = runf.resample('D').mean()
        bif = bif.resample('D').mean()

        # In[21]:

        # unit conversion for vvwm: runoff and bifenthrin
        ## Runoff
        ### 1. multiply by 86400 to convert days into seconds
        ### 2. multiply by 0.01 to convert square meters to hectares
        ### 3. divide by area of subcatchment (hectares) to get per-hectare volumes
        runf = runf.mul(86400).mul(0.01).div(sub_list_area)
        ## Bifenthrin
        ### multiply by runoff volume to get ????
        bif = bif.mul(runf.values)

        # In[22]:

        for o in outfalls:
            # create outfall directory path
            outfall_dir = os.path.join(sdir_path, o)

            # make the directory for which we just constructed a path
            if not os.path.exists(outfall_dir):
                os.mkdir(outfall_dir)
                print("Folder ", sid, o, " created", "\n")
            else:
                print("Folder ", sid, o, "already exists")

            # create a vector to strain the outfall[o]-specific subcatchments out of the 113 subcatchments
            # weights vector has 1 at each index corresponding to a subcatchment of outfall[o], and 0 at the rest
            weights = np.array([(1 if x in sub_ids[o] else 0)
                                for x in range(113)])
            # we want the daily totals of the runoff and bifenthrin within outfall[o]
            # use dot product and the weights vector to evaluate this on the df
            runf_sum = runf @ weights
            bif_sum = bif @ weights

            # we want to combine tables and add filler and date-part columns
            vvwm_df = pd.DataFrame({
                "year": runf_sum.index.year,
                "month": runf_sum.index.month,
                "day": runf_sum.index.day,
                "runf_sum": runf_sum,
                "B": 0,
                "bif_sum": bif_sum,
                "MEp": 0
            })

            # make swmm output data into vvwm input file
            outfall_path = os.path.join(outfall_dir, "output.zts")
            # read out into comma-delimited .zts file
            vvwm_df.to_csv(outfall_path, header=False, index=False, sep=',')

            # for this to work, we need to write 3 blank lines to the beginning
            with open(outfall_path, "r") as read_file:
                filelines = read_file.readlines()
            # write this back into it with 3 lines added to the beginning
            with open(outfall_path, "w") as write_file:
                # write blanks to file
                write_file.write('\n\n\n')
                # append original lines to file after blank lines
                write_file.writelines(filelines)

        # In[23]:

        # create a blank df to populate
        output_df = pd.DataFrame()

        for o in outfalls:
            # create outfall directory path and outfall file path
            outfall_dir = os.path.join(sdir_path, o)
            outfall_file = os.path.join(outfall_dir, "vvwmTransfer.txt")

            # make vvwm setup file for outfall o:
            with open(vvwmTransfer_path, "r") as read_file:
                filelines = read_file.readlines()

            # update parameter values
            for c, param in enumerate(list(vvwm_keys)[0:6]):
                filelines[c + 4] = str(vvwm_params[param]) + "\n"
            filelines[11] = str(vvwm_params[vvwm_keys[6]]) + "\n"
            filelines[17] = str(vvwm_params[vvwm_keys[7]]) + "\n"
            for c, param in enumerate(list(vvwm_keys)[8:14]):
                filelines[c + 40] = str(vvwm_params[param]) + "\n"
            for c, param in enumerate(list(vvwm_keys)[14:18]):
                filelines[c + 47] = str(vvwm_params[param]) + "\n"

            # Update path to (swmm) output (aka, vvwm input) data file
            filelines[0] = os.path.join(outfall_dir, "output") + '\n'
            # Update path to weather file
            filelines[29] = os.path.join(outfall_dir, "vvwm_wet.dvf") + '\n'
            # Insert paths that don't and won't exist, but need to be included as a technicality or else the model won't run
            filelines[68] = os.path.join(
                outfall_dir, "output_NPlesant_Custom_parent_daily.csv") + '\n'
            filelines[69] = os.path.join(
                outfall_dir, "output_NPlesant_Custom_deg1_daily.csv") + '\n'
            filelines[70] = os.path.join(
                outfall_dir, "output_NPlesant_Custom_deg2_daily.csv") + '\n'
            filelines[71] = os.path.join(
                outfall_dir,
                "output_NPlesant_Custom_parent_analysis.txt") + '\n'
            filelines[72] = os.path.join(
                outfall_dir, "output_NPlesant_Custom_deg1_analysis.txt") + '\n'
            filelines[73] = os.path.join(
                outfall_dir, "output_NPlesant_Custom_deg2_analysis.txt") + '\n'
            filelines[74] = os.path.join(
                outfall_dir, "output_NPlesant_Custom_parent_deem.rdf") + '\n'
            filelines[75] = os.path.join(
                outfall_dir, "output_NPlesant_Custom_deg1_deem.rdf") + '\n'
            filelines[76] = os.path.join(
                outfall_dir, "output_NPlesant_Custom_deg2_deem.rdf") + '\n'
            filelines[77] = os.path.join(
                outfall_dir,
                "output_NPlesant_Custom_parent_calendex.rdf") + '\n'
            filelines[78] = os.path.join(
                outfall_dir, "output_NPlesant_Custom_deg1_calendex.rdf") + '\n'
            filelines[79] = os.path.join(
                outfall_dir, "output_NPlesant_Custom_deg2_calendex.rdf") + '\n'
            filelines[80] = os.path.join(
                outfall_dir, "output_NPlesant_Custom_parent_esa.txt") + '\n'
            filelines[81] = os.path.join(
                outfall_dir, "output_NPlesant_Custom_deg1_esa.txt") + '\n'
            filelines[82] = os.path.join(
                outfall_dir, "output_NPlesant_Custom_deg2_esa.txt") + '\n'

            with open(outfall_file, "w") as write_file:
                # write out file
                write_file.writelines(filelines)

            # copy weather file into new file location
            if mode == 'debug':
                old_wet_path = os.path.join(main_path, "master_debug",
                                            "vvwm_wet.dvf")
            elif mode == 'test':
                old_wet_path = os.path.join(main_path, "master_test",
                                            "vvwm_wet.dvf")
            elif mode == 'run':
                old_wet_path = os.path.join(weather_path, "vvwm_wet.dvf")
            new_wet_path = os.path.join(outfall_dir, "vvwm_wet.dvf")
            shutil.copyfile(old_wet_path, new_wet_path)

            # copy exe into new file location
            if sys.platform == "linux" or sys.platform == "linux2":
                exe_bn = "vvwm"
            elif sys.platform == "win32":
                exe_bn = "VVWM.exe"
            old_exe_path = os.path.join(exe_path, exe_bn)
            new_exe_path = os.path.join(outfall_dir, exe_bn)
            shutil.copyfile(old_exe_path, new_exe_path)

            # run vvwm.exe (vvwm.exe [...]/outfall_31_xx/vvwmTransfer.txt)
            command = new_exe_path + ' ' + outfall_file
            subprocess.call(command)

            # simulated data vs field data units conversion:
            ## Simulation data conc: units = kg/m^3
            ## Observed data conc: units = micrograms/L
            # unit conversion: (meters^3 / liter) & (micrograms / kilogram)
            m3_per_L, mcrg_per_kg = 0.001, 1000000000

            # read in produced data from the output of the vvwm run we just completed
            output = pd.read_csv(filelines[68][:-1],
                                 usecols=[1],
                                 skiprows=5,
                                 names=["davg_bif_conc"
                                        ]) * m3_per_L * mcrg_per_kg
            ## Conversion process: Kg/m^3 * 1m^3/1000L * 1000000000 micrograms/kg = 1000000 * 1microgram/L

            # add a date column
            if mode == 'debug':
                output['Sample_date'] = pd.date_range(start='1/1/2009',
                                                      periods=103,
                                                      freq='D')
            elif mode == 'test':
                output['Sample_date'] = pd.date_range(start='1/1/2009',
                                                      periods=778,
                                                      freq='D')
            elif mode == 'run':
                output['Sample_date'] = pd.date_range(start='1/1/2009',
                                                      periods=3287,
                                                      freq='D')
            # add a location columns
            output['Site_code'] = o[-5:]
            # sift out only the rows with dates and sites that there is also field data available for
            output = output.merge(obs_data,
                                  how="inner",
                                  on=['Sample_date', 'Site_code'])
            # change index to a (date, site) pair
            output.set_index([
                np.datetime_as_string(output.Sample_date, unit='D'),
                'Site_code'
            ],
                             inplace=True)
            # add this df to the df that contains the dfs of all outfalls
            output_df = output_df.append(output[[
                'davg_bif_conc',
            ]],
                                         ignore_index=False)

        # label and sort by date and site
        output_df = output_df.set_index(
            [["_".join([a, b[3:]]) for a, b in output_df.index]]).sort_index()

        # In[24]:

        # conver data frame to dictionary
        output_dict = output_df.to_dict()['davg_bif_conc']

        # In[25]:

        # cleanup
        if vvwm_cleanup == 'none' or vvwm_cleanup == 'some' or vvwm_cleanup == 'full':
            exe_ = os.path.join(sdir_path, "outfall_31_??", exe_bn)
            wet_ = os.path.join(sdir_path, "outfall_31_??", "vvwm_wet.dvf")
            loginfo("Deleting vvwm exe and weather files to free up memory.")
            rm(exe_, wet_)
        if vvwm_cleanup == 'some' or vvwm_cleanup == 'full':
            zts_ = os.path.join(sdir_path, "outfall_31_??", "output.zts")
            analysis_ = os.path.join(
                sdir_path, "outfall_31_??",
                "output_NPlesant_Custom_parent_analysis.txt")
            transfer_ = os.path.join(sdir_path, "outfall_31_??",
                                     "vvwmTransfer.txt")
            loginfo("Deleting internal vvwm files to free up memory.")
            rm(zts_, analysis_, transfer_)
        if vvwm_cleanup == 'full':
            ofdir_ = os.path.join(sdir_path, "outfall_31_??")
            loginfo(
                "Deleting vvwm results files and folder to free up memory.")
            rm(ofdir_)
        if vvwm_cleanup == 'full' and swmm_cleanup == 'full':
            loginfo("Deleting temp folder.")
            rm(sdir_path)

        # In[26]:

        return (output_dict)
Ejemplo n.º 12
0
swmm_out_filename = "RR_HD_WQ.out"
csv_filename = "RR_HD_WQ.csv"

f = SW.SwmmExtract(swmm_out_filename)
type_mydic = dict(zip(range(len(f.itemlist)),f.itemlist))
for num,item in type_mydic():
    print(num,item)
print("请输入模型对象序号")
type_number = int(input())
if len(f.name[type_number])==0:
    print("结果中不存在%s对象,请重新输入模型对象序号:"%(type_mydic[type_number]))
    print("-"*20)
    type_number = int(input())
    print("-"* 20)
for key,val in f.varcode[type_number].items():
    print(key,val)
print("请输入结果类型序号:")
item_number = int(input())
print("-"*20)

data = SW.extract(swmm_out_filename,str(type_mydic[type_number]+','+','+f.varcode[type_number]))
name_list = [data.replace(type_mydic[type_number],"").replace(f.varcode)[type_number]]

data.columns = name_list
frame = data[0:1].copy()
frame.index = [f.startdata]
data = pd.concat([frame,data])
data.to_csv(csv_filename.replace(".csv",str("_"+f.varcode[type_number][item_number])))

print("结果输出完成")
Ejemplo n.º 13
0
import os
import numpy as np
import pandas as pd
from swmmtoolbox import swmmtoolbox

output_dir = '../data/out'

data = {}

for fn in os.listdir(output_dir):
    if fn.endswith('.out'):
        basename = fn.split('.out')[0]
        outfall = swmmtoolbox.extract('../data/out/{0}'.format(fn),
                                      'system,Flow_leaving_outfalls,11')
        data[basename] = outfall

for fn in data:
    data[fn].columns = [fn]

df = pd.concat(data.values(), axis=1).sort_index(axis=1)
df.to_csv('../data/all_outflows.csv')