def organize_independent_networks(connections):

    rconn = nhd_network.reverse_network(connections)
    independent_networks = nhd_network.reachable_network(rconn)
    reaches_bytw = {}
    for tw, net in independent_networks.items():
        path_func = partial(nhd_network.split_at_junction, net)
        reaches_bytw[tw] = nhd_network.dfs_decomposition(net, path_func)

    return independent_networks, reaches_bytw, rconn
def organize_independent_networks(connections, wbodies=None):

    rconn = nhd_network.reverse_network(connections)
    independent_networks = nhd_network.reachable_network(rconn)
    reaches_bytw = {}
    for tw, net in independent_networks.items():
        if wbodies:
            path_func = partial(
                nhd_network.split_at_waterbodies_and_junctions, set(wbodies), net
            )
        else:
            path_func = partial(nhd_network.split_at_junction, net)

        reaches_bytw[tw] = nhd_network.dfs_decomposition(net, path_func)

    return independent_networks, reaches_bytw, rconn
Пример #3
0
def main():

    args = _handle_args()

    next_gen_input_folder = test_folder.joinpath("input", "next_gen")
    if args.input:
        next_gen_input_folder = pathlib.Path(args.input)

    # The following 2 values are currently hard coded for this test domain
    nts = 720  # number of timestep = 1140 * 60(model timestep) = 86400 = day
    dt_mc = 300.0  # time interval for MC

    # Currently tested on the Sugar Creek domain
    ngen_network_df = nhd_io.read_geopandas(args.supernetwork)
    if args.subset:
        ngen_network_df = ngen_network_df[
            ngen_network_df['realized_catchment'].isin(args.subset)]

    # Create dictionary mapping each connection ID
    ngen_network_dict = dict(zip(ngen_network_df.id, ngen_network_df.toid))

    #ngen_network_dict = dict(zip(ngen_network_df.ID, ngen_network_df.toID))

    def node_key_func(x):
        return int(x[3:])

    # Extract the ID integer values
    waterbody_connections = {
        node_key_func(k): node_key_func(v)
        for k, v in ngen_network_dict.items()
    }

    # Convert dictionary connections to data frame and make ID column the index
    waterbody_df = pd.DataFrame.from_dict(waterbody_connections,
                                          orient='index',
                                          columns=['to'])
    # Sort ID index column
    waterbody_df = waterbody_df.sort_index()

    waterbody_df = nhd_io.replace_downstreams(waterbody_df, "to", 0)

    connections = nhd_network.extract_connections(waterbody_df, "to")

    # Read and convert catchment lateral flows to format that can be processed by compute_network
    qlats = next_gen_io.read_catchment_lateral_flows(next_gen_input_folder)
    print(qlats)
    rconn = nhd_network.reverse_network(connections)

    subnets = nhd_network.reachable_network(rconn, check_disjoint=False)

    # read the routelink file
    nhd_routelink = nhd_io.read_netcdf("data/RouteLink_NHDPLUS.nc")
    nhd_routelink['dt'] = 300.0

    nhd_routelink.set_index("link", inplace=True)

    routelink_cols = {
        "downstream": "to",
        "dx": "Length",
        "n": "n",
        "ncc": "nCC",
        "s0": "So",
        "bw": "BtmWdth",
        "tw": "TopWdth",
        "twcc": "TopWdthCC",
        "waterbody": "NHDWaterbodyComID",
        "musk": "MusK",
        "musx": "MusX",
        "cs": "ChSlp",
    }

    routelink_cols = dict([(value, key)
                           for key, value in routelink_cols.items()])

    nhd_routelink.rename(columns=routelink_cols, inplace=True)

    with open(next_gen_input_folder / 'coarse/crosswalk.json') as f:
        crosswalk_data = json.load(f)
    waterbody_df['comid'] = waterbody_df.apply(
        lambda x: crosswalk_data['cat-' + str(x.name)]['outlet_COMID'], axis=1)

    waterbody_df = waterbody_df.join(nhd_routelink, on='comid', how='left')

    del nhd_routelink

    # initial conditions, assume to be zero
    # TO DO: Allow optional reading of initial conditions from WRF
    q0 = pd.DataFrame(0,
                      index=waterbody_df.index,
                      columns=["qu0", "qd0", "h0"],
                      dtype="float32")

    #Set types as float32
    waterbody_df = waterbody_df.astype({
        "dt": "float32",
        "bw": "float32",
        "tw": "float32",
        "twcc": "float32",
        "dx": "float32",
        "n": "float32",
        "ncc": "float32",
        "cs": "float32",
        "s0": "float32"
    })

    subreaches = {}

    for tw, net in subnets.items():
        path_func = partial(nhd_network.split_at_junction, net)
        subreaches[tw] = nhd_network.dfs_decomposition(net, path_func)

    results = []
    for twi, (tw, reach) in enumerate(subreaches.items(), 1):
        r = list(chain.from_iterable(reach))
        data_sub = waterbody_df.loc[
            r, ['dt', 'bw', 'tw', 'twcc', 'dx', 'n', 'ncc', 'cs', 's0'
                ]].sort_index()
        #data_sub = waterbody_df.loc[r, ['dt', 'bw', 'tw', 'twcc', 'dx', 'n', 'ncc', 'cs', 's0']]
        qlat_sub = qlats.loc[r].sort_index()
        q0_sub = q0.loc[r].sort_index()

        results.append(
            mc_reach.compute_network(nts, reach, subnets[tw],
                                     data_sub.index.values,
                                     data_sub.columns.values, data_sub.values,
                                     qlat_sub.values, q0_sub.values))

    fdv_columns = pd.MultiIndex.from_product([range(nts),
                                              ['q', 'v',
                                               'd']]).to_flat_index()
    flowveldepth = pd.concat(
        [pd.DataFrame(d, index=i, columns=fdv_columns) for i, d in results],
        copy=False)
    flowveldepth = flowveldepth.sort_index()
    outfile_base_name = (args.supernetwork).split(".")[0]
    flowveldepth.to_csv(f"{outfile_base_name}_mc_results.csv")
    print(flowveldepth)
Пример #4
0
def diffusive_input_data_v02(tw, connections, rconn, reach_list,
                             diffusive_parameters, geo_cols, geo_index,
                             geo_data, qlat_data, initial_conditions,
                             upstream_results, qts_subdivisions, nsteps):
    """
    Build input data objects for diffusive wave model
    
    Parameters
    ----------
    tw -- (int) Tailwater segment ID
    connections -- (dict) donwstream connections for each segment in the network
    rconn -- (dict) upstream connections for each segment in the network
    reach_list -- (list of lists) lists of segments comprising different reaches in the network
    diffusive_parametters -- (dict) Diffusive wave model parameters
    geo_cols -- (ndarray of strs) column headers for geomorphic parameters data array (geo_data)
    geo_index -- (ndarray of int64s) row indices for geomorphic parameters data array (geo_data)
    geo_data --(ndarray of float32s) geomorphic parameters data array
    qlat_data -- (ndarray of float32) qlateral data (m3/sec)
    initial_conditions -- (ndarray of float32) initial flow (m3/sec) and depth (m above ch bottom) states for network nodes
    upstream_results -- (dict) with values of 1d arrays upstream flow, velocity, and depth   
    qts_subdivisions -- (int) number of qlateral timestep subdivisions 
 
    Returns
    -------
    diff_ins -- (dict) formatted inputs for diffusive wave model
    """

    # diffusive time steps info.
    dt_ql_g = geo_data[0, 0] * qts_subdivisions
    dt_ub_g = geo_data[
        0,
        0] * qts_subdivisions  # TODO: make this timestep the same as the simulation timestep
    dt_db_g = geo_data[
        0,
        0] * qts_subdivisions  # TODO: make this timestep the same as the simulation timestep
    saveinterval_g = geo_data[0, 0]
    saveinterval_ev_g = geo_data[0, 0]
    dtini_g = geo_data[0, 0]
    t0_g = 0.0  # simulation start hr **set to zero for Fortran computation
    tfin_g = (geo_data[0, 0] * nsteps) / 60 / 60

    # USGS data related info.
    usgsID = diffusive_parameters.get("usgsID", None)
    seg2usgsID = diffusive_parameters.get("link2usgsID", None)
    usgssDT = diffusive_parameters.get("usgs_start_date", None)
    usgseDT = diffusive_parameters.get("usgs_end_date", None)
    usgspCd = diffusive_parameters.get("usgs_parameterCd", None)

    # diffusive parameters
    cfl_g = diffusive_parameters.get("courant_number_upper_limit", None)
    theta_g = diffusive_parameters.get("theta_parameter", None)
    tzeq_flag_g = diffusive_parameters.get("chgeo_computation_flag", None)
    y_opt_g = diffusive_parameters.get("water_elevation_computation_flag",
                                       None)
    so_llm_g = diffusive_parameters.get("bed_slope_lower_limit", None)

    # number of reaches in network
    nrch_g = len(reach_list)

    # maximum number of nodes in a reach
    mxncomp_g = 0
    for r in reach_list:
        nnodes = len(r) + 1
        if nnodes > mxncomp_g:
            mxncomp_g = nnodes

    ds_seg = []
    offnet_wbodies = []
    upstream_flow_array = np.zeros((len(ds_seg), np.shape(qlat_data)[1]))
    if upstream_results:
        # create a list of segments downstream of reservoirs
        inv_map = nhd_network.reverse_network(rconn)
        for wbody_id in upstream_results:
            ds_seg.append(inv_map[wbody_id][0])
            offnet_wbodies.append(wbody_id)
        # build array of flow reservoir outflow
        upstream_flow_array = np.zeros((len(ds_seg), np.shape(qlat_data)[1]))
        for j, wbody_id in enumerate(upstream_results):
            tmp = upstream_results[wbody_id]
            for i, val in enumerate(tmp["results"][::3]):
                if i % qts_subdivisions == 0:
                    upstream_flow_array[j, int(i / qts_subdivisions)] = val

    # Order reaches by junction depth
    path_func = partial(nhd_network.split_at_waterbodies_and_junctions,
                        set(offnet_wbodies), rconn)
    tr = nhd_network.dfs_decomposition_depth_tuple(rconn, path_func)

    jorder_reaches = sorted(tr, key=lambda x: x[0])
    mx_jorder = max(jorder_reaches)[
        0]  # maximum junction order of subnetwork of TW

    ordered_reaches = {}
    rchhead_reaches = {}
    rchbottom_reaches = {}
    z_all = {}
    for o, rch in jorder_reaches:

        # add one more segment(fake) to the end of a list of segments to account for node configuration.
        fksegID = int(str(rch[-1]) + str(2))
        rch.append(fksegID)

        # additional segment(fake) to upstream bottom segments
        if any(j in rconn[rch[0]] for j in offnet_wbodies):
            fk_usbseg = []
        else:
            fk_usbseg = [int(str(x) + str(2)) for x in rconn[rch[0]]]

        if o not in ordered_reaches:
            ordered_reaches.update({o: []})
        ordered_reaches[o].append([
            rch[0],
            {
                "number_segments": len(rch),
                "segments_list": rch,
                "upstream_bottom_segments": fk_usbseg,
                "downstream_head_segment": connections[rch[-2]],
            },
        ])

        if rch[0] not in rchhead_reaches:
            # a list of segments for a given head segment
            rchhead_reaches.update(
                {rch[0]: {
                     "number_segments": len(rch),
                     "segments_list": rch
                 }})
            # a list of segments for a given bottom segment
            rchbottom_reaches.update(
                {rch[-1]: {
                     "number_segments": len(rch),
                     "segments_list": rch
                 }})
        # for channel altitude adjustment
        z_all.update({seg: {"adj.alt": np.zeros(1)} for seg in rch})

        # cahnnel geometry data
        a = np.where(geo_cols == "cs")
        geo_data[:, a] = 1.0 / geo_data[:, a]

    # --------------------------------------------------------------------------------------
    #                                 Step 0-3
    #    Adjust altitude so that altitude of the last sement of a reach is equal to that
    #    of the first segment of its downstream reach right after their common junction.
    # --------------------------------------------------------------------------------------
    dbfksegID = int(str(tw) + str(2))

    adj_alt1(mx_jorder, ordered_reaches, geo_cols, geo_index, geo_data,
             dbfksegID, z_all)

    # --------------------------------------------------------------------------------------
    #                                 Step 0-4
    #     Make Fortran-Python channel network mapping variables.
    # --------------------------------------------------------------------------------------

    # build a list of head segments in descending reach order [headwater -> tailwater]
    pynw = {}
    frj = -1
    for x in range(mx_jorder, -1, -1):
        for head_segment, reach in ordered_reaches[x]:
            frj = frj + 1
            pynw[frj] = head_segment

    frnw_col = diffusive_parameters.get("fortran_nework_map_col_number", None)
    frnw_g = fp_network_map(mx_jorder, ordered_reaches, rchbottom_reaches,
                            nrch_g, frnw_col, dbfksegID, pynw)

    # covert data type from integer to float for frnw
    dfrnw_g = np.zeros((nrch_g, frnw_col), dtype=float)
    for j in range(0, nrch_g):
        for col in range(0, frnw_col):
            dfrnw_g[j, col] = float(frnw_g[j, col])

    # ---------------------------------------------------------------------------------
    #                              Step 0-5
    #                  Prepare channel geometry data
    # ---------------------------------------------------------------------------------
    (
        z_ar_g,
        bo_ar_g,
        traps_ar_g,
        tw_ar_g,
        twcc_ar_g,
        mann_ar_g,
        manncc_ar_g,
        so_ar_g,
        dx_ar_g,
    ) = fp_chgeo_map(
        mx_jorder,
        ordered_reaches,
        geo_cols,
        geo_index,
        geo_data,
        z_all,
        mxncomp_g,
        nrch_g,
    )

    # ---------------------------------------------------------------------------------
    #                              Step 0-6
    #                  Prepare initial conditions data
    # ---------------------------------------------------------------------------------
    iniq = np.zeros((mxncomp_g, nrch_g))
    frj = -1
    for x in range(mx_jorder, -1, -1):
        for head_segment, reach in ordered_reaches[x]:
            seg_list = reach["segments_list"]
            ncomp = reach["number_segments"]
            frj = frj + 1
            for seg in range(0, ncomp):
                if seg == ncomp - 1:
                    segID = seg_list[seg - 1]
                else:
                    segID = seg_list[seg]

                idx_segID = np.where(geo_index == segID)
                iniq[seg, frj] = initial_conditions[idx_segID, 0]

    # ---------------------------------------------------------------------------------
    #                              Step 0-7

    #                  Prepare lateral inflow data
    # ---------------------------------------------------------------------------------
    nts_ql_g = (int((tfin_g - t0_g) * 3600.0 / dt_ql_g)
                )  # the number of the entire time steps of lateral flow data

    qlat_g = np.zeros((nts_ql_g, mxncomp_g, nrch_g))

    fp_qlat_map(
        mx_jorder,
        ordered_reaches,
        nts_ql_g,
        geo_cols,
        geo_index,
        geo_data,
        qlat_data,
        qlat_g,
    )

    # ---------------------------------------------------------------------------------
    #                              Step 0-8

    #       Prepare upstream boundary (top segments of head basin reaches) data
    # ---------------------------------------------------------------------------------
    nts_ub_g = nts_ql_g
    ubcd_g = fp_ubcd_map(frnw_g, pynw, nts_ub_g, nrch_g, ds_seg,
                         upstream_flow_array)

    # ---------------------------------------------------------------------------------
    #                              Step 0-9

    #       Prepare downstrea boundary (bottom segments of TW reaches) data
    # ---------------------------------------------------------------------------------
    if seg2usgsID:
        if tw in seg2usgsID:
            ipos = seg2usgsID.index(tw)
            usgsID2tw = usgsID[ipos]
        else:
            usgsID2tw = None
    else:
        usgsID2tw = None

    nts_db_g, dbcd_g = fp_dbcd_map(usgsID2tw, usgssDT, usgseDT, usgspCd)

    # ---------------------------------------------------------------------------------
    #                              Step 0-10

    #                 Prepare uniform flow lookup tables
    # ---------------------------------------------------------------------------------

    nhincr_m_g = diffusive_parameters.get(
        "normaldepth_lookuptable_main_increment_number", None)
    nhincr_f_g = diffusive_parameters.get(
        "normaldepth_lookuptable_floodplain_increment_number", None)
    timesdepth_g = diffusive_parameters.get(
        "normaldepth_lookuptable_depth_multiplier", None)
    ufqlt_m_g = np.zeros((mxncomp_g, nrch_g, nhincr_m_g))
    ufhlt_m_g = np.zeros((mxncomp_g, nrch_g, nhincr_m_g))
    ufqlt_f_g = np.zeros((mxncomp_g, nrch_g, nhincr_f_g))
    ufhlt_f_g = np.zeros((mxncomp_g, nrch_g, nhincr_f_g))

    # TODO: Call uniform flow lookup table creation kernel

    # ---------------------------------------------------------------------------------
    #                              Step 0-11

    #                       Build input dictionary
    # ---------------------------------------------------------------------------------
    ntss_ev_g = int((tfin_g - t0_g) * 3600.0 / saveinterval_ev_g)

    # build a dictionary of diffusive model inputs and helper variables
    diff_ins = {}

    # model input parameters
    diff_ins["dtini_g"] = dtini_g
    diff_ins["t0_g"] = t0_g
    diff_ins["tfin_g"] = tfin_g
    diff_ins["saveinterval_g"] = saveinterval_g
    diff_ins["saveinterval_ev_g"] = saveinterval_ev_g
    diff_ins["dt_ql_g"] = dt_ql_g
    diff_ins["dt_ub_g"] = dt_ub_g
    diff_ins["dt_db_g"] = dt_db_g
    diff_ins["nts_ql_g"] = nts_ql_g
    diff_ins["nts_ub_g"] = nts_ub_g
    diff_ins["nts_db_g"] = nts_db_g
    diff_ins["mxncomp_g"] = mxncomp_g
    diff_ins["nrch_g"] = nrch_g
    diff_ins["z_ar_g"] = z_ar_g
    diff_ins["bo_ar_g"] = bo_ar_g
    diff_ins["traps_ar_g"] = traps_ar_g
    diff_ins["tw_ar_g"] = tw_ar_g
    diff_ins["twcc_ar_g"] = twcc_ar_g
    diff_ins["mann_ar_g"] = mann_ar_g
    diff_ins["manncc_ar_g"] = manncc_ar_g
    diff_ins["so_ar_g"] = so_ar_g
    diff_ins["dx_ar_g"] = dx_ar_g
    diff_ins["nhincr_m_g"] = nhincr_m_g
    diff_ins["nhincr_f_g"] = nhincr_f_g
    diff_ins["ufhlt_m_g"] = ufhlt_m_g
    diff_ins["ufqlt_m_g"] = ufqlt_m_g
    diff_ins["ufhlt_f_g"] = ufhlt_f_g
    diff_ins["ufqlt_f_g"] = ufqlt_f_g
    diff_ins["frnw_col"] = frnw_col
    diff_ins["frnw_g"] = frnw_g
    diff_ins["qlat_g"] = qlat_g
    diff_ins["ubcd_g"] = ubcd_g
    diff_ins["dbcd_g"] = dbcd_g
    diff_ins["cfl_g"] = cfl_g
    diff_ins["theta_g"] = theta_g
    diff_ins["tzeq_flag_g"] = tzeq_flag_g
    diff_ins["y_opt_g"] = y_opt_g
    diff_ins["so_llm_g"] = so_llm_g
    diff_ins["ntss_ev_g"] = ntss_ev_g
    diff_ins["iniq"] = iniq

    # python-fortran crosswalk data
    diff_ins["pynw"] = pynw
    diff_ins["ordered_reaches"] = ordered_reaches

    return diff_ins
Пример #5
0
def main():

    args = _handle_args()

    nts = args.nts
    debuglevel = -1 * args.debuglevel
    verbose = args.verbose
    showtiming = args.showtiming
    supernetwork = args.supernetwork
    break_network_at_waterbodies = args.break_network_at_waterbodies
    csv_output_folder = args.csv_output_folder
    assume_short_ts = args.assume_short_ts

    test_folder = pathlib.Path(root, "test")
    geo_input_folder = test_folder.joinpath("input", "geo")

    # TODO: Make these commandline args
    """##NHD Subset (Brazos/Lower Colorado)"""
    # supernetwork = 'Brazos_LowerColorado_Named_Streams'
    # supernetwork = 'Brazos_LowerColorado_ge5'
    # supernetwork = 'Pocono_TEST1'
    """##NHD CONUS order 5 and greater"""
    # supernetwork = 'CONUS_ge5'
    """These are large -- be careful"""
    # supernetwork = 'Mainstems_CONUS'
    # supernetwork = 'CONUS_FULL_RES_v20'
    # supernetwork = 'CONUS_Named_Streams' #create a subset of the full resolution by reading the GNIS field
    # supernetwork = 'CONUS_Named_combined' #process the Named streams through the Full-Res paths to join the many hanging reaches

    if verbose:
        print("creating supernetwork connections set")
    if showtiming:
        start_time = time.time()

    # STEP 1
    network_data = nnu.set_supernetwork_data(
        supernetwork=args.supernetwork,
        geo_input_folder=geo_input_folder,
        verbose=False,
        debuglevel=debuglevel,
    )

    cols = network_data["columns"]
    param_df = nhd_io.read(network_data["geo_file_path"])
    param_df = param_df[list(cols.values())]
    param_df = param_df.set_index(cols["key"])

    if "mask_file_path" in network_data:
        data_mask = nhd_io.read_mask(
            network_data["mask_file_path"],
            layer_string=network_data["mask_layer_string"],
        )
        param_df = param_df.filter(data_mask.iloc[:, network_data["mask_key"]], axis=0)

    param_df = param_df.sort_index()
    param_df = nhd_io.replace_downstreams(param_df, cols["downstream"], 0)

    if args.ql:
        qlats = nhd_io.read_qlat(args.ql)
    else:
        qlats = constant_qlats(param_df, nts, 10.0)

    # initial conditions, assume to be zero
    # TO DO: Allow optional reading of initial conditions from WRF
    q0 = pd.DataFrame(
        0, index=param_df.index, columns=["qu0", "qd0", "h0"], dtype="float32"
    )

    connections = nhd_network.extract_connections(param_df, cols["downstream"])
    wbodies = nhd_network.extract_waterbodies(
        param_df, cols["waterbody"], network_data["waterbody_null_code"]
    )

    if verbose:
        print("supernetwork connections set complete")
    if showtiming:
        print("... in %s seconds." % (time.time() - start_time))

    # STEP 2
    if showtiming:
        start_time = time.time()
    if verbose:
        print("organizing connections into reaches ...")

    rconn = nhd_network.reverse_network(connections)
    independent_networks = nhd_network.reachable_network(rconn)
    reaches_bytw = {}
    for tw, net in independent_networks.items():
        path_func = partial(nhd_network.split_at_junction, net)
        reaches_bytw[tw] = nhd_network.dfs_decomposition(net, path_func)

    if verbose:
        print("reach organization complete")
    if showtiming:
        print("... in %s seconds." % (time.time() - start_time))

    if showtiming:
        start_time = time.time()

    param_df["dt"] = 300.0
    param_df = param_df.rename(columns=nnu.reverse_dict(cols))
    param_df = param_df.astype("float32")

    # datasub = data[['dt', 'bw', 'tw', 'twcc', 'dx', 'n', 'ncc', 'cs', 's0']]

    parallel_compute_method = args.parallel_compute_method
    cpu_pool = args.cpu_pool
    compute_method = args.compute_method

    if compute_method == "standard cython compute network":
        compute_func = mc_reach.compute_network
    else:
        compute_func = mc_reach.compute_network

    if parallel_compute_method == "by-network":
        with Parallel(n_jobs=cpu_pool, backend="threading") as parallel:
            jobs = []
            for twi, (tw, reach_list) in enumerate(reaches_bytw.items(), 1):
                r = list(chain.from_iterable(reach_list))
                param_df_sub = param_df.loc[
                    r, ["dt", "bw", "tw", "twcc", "dx", "n", "ncc", "cs", "s0"]
                ].sort_index()
                qlat_sub = qlats.loc[r].sort_index()
                q0_sub = q0.loc[r].sort_index()
                jobs.append(
                    delayed(compute_func)(
                        nts,
                        reach_list,
                        independent_networks[tw],
                        param_df_sub.index.values,
                        param_df_sub.columns.values,
                        param_df_sub.values,
                        qlat_sub.values,
                        q0_sub.values,
                    )
                )
            results = parallel(jobs)

    else:  # Execute in serial
        results = []
        for twi, (tw, reach_list) in enumerate(reaches_bytw.items(), 1):
            r = list(chain.from_iterable(reach_list))
            param_df_sub = param_df.loc[
                r, ["dt", "bw", "tw", "twcc", "dx", "n", "ncc", "cs", "s0"]
            ].sort_index()
            qlat_sub = qlats.loc[r].sort_index()
            q0_sub = q0.loc[r].sort_index()
            results.append(
                compute_func(
                    nts,
                    reach_list,
                    independent_networks[tw],
                    param_df_sub.index.values,
                    param_df_sub.columns.values,
                    param_df_sub.values,
                    qlat_sub.values,
                    q0_sub.values,
                )
            )

    if (debuglevel <= -1) or csv_output_folder:
        qvd_columns = pd.MultiIndex.from_product(
            [range(nts), ["q", "v", "d"]]
        ).to_flat_index()
        flowveldepth = pd.concat(
            [pd.DataFrame(d, index=i, columns=qvd_columns) for i, d in results],
            copy=False,
        )

        if csv_output_folder:
            flowveldepth = flowveldepth.sort_index()
            output_path = pathlib.Path(csv_output_folder).resolve()
            flowveldepth.to_csv(output_path.joinpath(f"{args.supernetwork}.csv"))

        if debuglevel <= -1:
            print(flowveldepth)

    if verbose:
        print("ordered reach computation complete")
    if showtiming:
        print("... in %s seconds." % (time.time() - start_time))
Пример #6
0
def test_reverse_network():
    connections = expected_connections
    rconn = nhd_network.reverse_network(connections)
    assert expected_rconn == rconn
    rrconn = nhd_network.reverse_network(rconn)
    assert rrconn == connections