def get_network_data(network_name): # Create directory path variable for test/input/geo, where NHD data and masks are stored test_folder = pathlib.Path(root, r"test").resolve() geo_input_folder = pathlib.Path(test_folder, r"input", r"geo").resolve() # Load network meta data for the Cape Fear Basin supernetwork = network_name network_data = nnu.set_supernetwork_data(supernetwork=supernetwork, geo_input_folder=geo_input_folder) # if the NHDPlus RouteLink file does not exist, download it. if not network_data["geo_file_path"].is_file: filename = network_data["geo_file_path"].name network_dl.download(network_data["geo_file_path"], network_data["data_link"]) # read-in NHD data, retain copies for viz- and full network analysis purposes RouteLink = nhd_io.read(network_data["geo_file_path"]) # select only the necessary columns of geospatial data, set the DataFrame index cols = [v for c, v in network_data["columns"].items()] # GET THE STRAHLER ORDER DATA TOO! cols.append("order") data = nhd_io.read(network_data["geo_file_path"]) data = data[cols] data = data.set_index(network_data["columns"]["key"]) # mask NHDNetwork to isolate test network - full resolution Cape Fear basin, NC if "mask_file_path" in network_data: data_mask = nhd_io.read_mask( network_data["mask_file_path"], layer_string=network_data["mask_layer_string"], ) data = data.filter(data_mask.iloc[:, network_data["mask_key"]], axis=0) # sort index data = data.sort_index() # replace downstreams data = nhd_io.replace_downstreams(data, network_data["columns"]["downstream"], 0) return data, RouteLink, network_data
def main(): args = _handle_args() nts = 144 debuglevel = -1 * args.debuglevel verbose = args.verbose showtiming = args.showtiming supernetwork = args.supernetwork break_network_at_waterbodies = args.break_network_at_waterbodies write_output = args.write_output assume_short_ts = args.assume_short_ts test_folder = pathlib.Path(root, "test") geo_input_folder = test_folder.joinpath("input", "geo") # TODO: Make these commandline args """##NHD Subset (Brazos/Lower Colorado)""" # supernetwork = 'Brazos_LowerColorado_Named_Streams' # supernetwork = 'Brazos_LowerColorado_ge5' # supernetwork = 'Pocono_TEST1' """##NHD CONUS order 5 and greater""" # supernetwork = 'CONUS_ge5' """These are large -- be careful""" # supernetwork = 'Mainstems_CONUS' # supernetwork = 'CONUS_FULL_RES_v20' # supernetwork = 'CONUS_Named_Streams' #create a subset of the full resolution by reading the GNIS field # supernetwork = 'CONUS_Named_combined' #process the Named streams through the Full-Res paths to join the many hanging reaches if verbose: print("creating supernetwork connections set") if showtiming: start_time = time.time() # STEP 1 network_data = nnu.set_supernetwork_data( supernetwork=args.supernetwork, geo_input_folder=geo_input_folder, verbose=False, debuglevel=debuglevel, ) cols = network_data["columns"] data = nhd_io.read(network_data["geo_file_path"]) data = data[list(cols.values())] data = data.set_index(cols["key"]) if "mask_file_path" in network_data: data_mask = nhd_io.read_mask( network_data["mask_file_path"], layer_string=network_data["mask_layer_string"], ) data = data.filter(data_mask.iloc[:, network_data["mask_key"]], axis=0) data = data.sort_index() data = nhd_io.replace_downstreams(data, cols["downstream"], 0) if args.ql: qlats = nhd_io.read_qlat(args.ql) else: qlats = constant_qlats(data, nts, 10.0) connections = nhd_network.extract_connections(data, cols["downstream"]) wbodies = nhd_network.extract_waterbodies( data, cols["waterbody"], network_data["waterbody_null_code"] ) if verbose: print("supernetwork connections set complete") if showtiming: print("... in %s seconds." % (time.time() - start_time)) # STEP 2 if showtiming: start_time = time.time() if verbose: print("organizing connections into reaches ...") rconn = nhd_network.reverse_network(connections) subnets = nhd_network.reachable_network(rconn) subreaches = {} for tw, net in subnets.items(): path_func = partial(nhd_network.split_at_junction, net) subreaches[tw] = nhd_network.dfs_decomposition(net, path_func) if verbose: print("reach organization complete") if showtiming: print("... in %s seconds." % (time.time() - start_time)) if showtiming: start_time = time.time() data["dt"] = 300.0 data = data.rename(columns=nnu.reverse_dict(cols)) data = data.astype("float32") # datasub = data[['dt', 'bw', 'tw', 'twcc', 'dx', 'n', 'ncc', 'cs', 's0']] parallelcompute = False if parallelcompute: with Parallel(n_jobs=-1, backend="threading") as parallel: jobs = [] for twi, (tw, reach) in enumerate(subreaches.items(), 1): r = list(chain.from_iterable(reach)) data_sub = data.loc[ r, ["dt", "bw", "tw", "twcc", "dx", "n", "ncc", "cs", "s0"] ].sort_index() qlat_sub = qlats.loc[r].sort_index() jobs.append( delayed(mc_reach.compute_network)( nts, reach, subnets[tw], data_sub.index.values, data_sub.columns.values, data_sub.values, qlat_sub.values, ) ) results = parallel(jobs) else: results = [] for twi, (tw, reach) in enumerate(subreaches.items(), 1): r = list(chain.from_iterable(reach)) data_sub = data.loc[ r, ["dt", "bw", "tw", "twcc", "dx", "n", "ncc", "cs", "s0"] ].sort_index() qlat_sub = qlats.loc[r].sort_index() results.append( mc_reach.compute_network( nts, reach, subnets[tw], data_sub.index.values, data_sub.columns.values, data_sub.values, qlat_sub.values, ) ) fdv_columns = pd.MultiIndex.from_product( [range(nts), ["q", "v", "d"]] ).to_flat_index() flowveldepth = pd.concat( [pd.DataFrame(d, index=i, columns=fdv_columns) for i, d in results], copy=False ) flowveldepth = flowveldepth.sort_index() flowveldepth.to_csv(f"{args.supernetwork}.csv") print(flowveldepth) if verbose: print("ordered reach computation complete") if showtiming: print("... in %s seconds." % (time.time() - start_time))
def main(): args = _handle_args() #The following 2 values are currently hard coded for this test domain nts = 720 # number of timestep = 1140 * 60(model timestep) = 86400 = day dt_mc = 300.0 # time interval for MC #Currently tested on the Sugar Creek domain ngen_network_df = nhd_io.read_geopandas( os.path.join(next_gen_input_folder, args.supernetwork)) #Create dictionary mapping each connection ID ngen_network_dict = dict(zip(ngen_network_df.ID, ngen_network_df.toID)) def node_key_func(x): return int(x[4:]) #Extract the ID integer values waterbody_connections = { node_key_func(k): node_key_func(v) for k, v in ngen_network_dict.items() } #Convert dictionary connections to data frame and make ID column the index waterbody_df = pd.DataFrame.from_dict(waterbody_connections, orient='index', columns=['to']) #Sort ID index column waterbody_df = waterbody_df.sort_index() waterbody_df = nhd_io.replace_downstreams(waterbody_df, "to", 0) connections = nhd_network.extract_connections(waterbody_df, "to") #Read and convert catchment lateral flows to format that can be processed by compute_network qlats = next_gen_io.read_catchment_lateral_flows(next_gen_input_folder) rconn = nhd_network.reverse_network(connections) subnets = nhd_network.reachable_network(rconn, check_disjoint=False) waterbody_df['dt'] = 300.0 #Setting all below to 1.0 until we can get the appropriate parameters waterbody_df['bw'] = 1.0 waterbody_df['tw'] = 1.0 waterbody_df['twcc'] = 1.0 waterbody_df['dx'] = 1.0 waterbody_df['n'] = 1.0 waterbody_df['ncc'] = 1.0 waterbody_df['cs'] = 1.0 waterbody_df['s0'] = 1.0 #Set types as float32 waterbody_df = waterbody_df.astype({ "dt": "float32", "bw": "float32", "tw": "float32", "twcc": "float32", "dx": "float32", "n": "float32", "ncc": "float32", "cs": "float32", "s0": "float32" }) subreaches = {} for tw, net in subnets.items(): path_func = partial(nhd_network.split_at_junction, net) subreaches[tw] = nhd_network.dfs_decomposition(net, path_func) results = [] for twi, (tw, reach) in enumerate(subreaches.items(), 1): r = list(chain.from_iterable(reach)) data_sub = waterbody_df.loc[ r, ['dt', 'bw', 'tw', 'twcc', 'dx', 'n', 'ncc', 'cs', 's0' ]].sort_index() qlat_sub = qlats.loc[r].sort_index() results.append( mc_reach.compute_network(nts, reach, subnets[tw], data_sub.index.values, data_sub.columns.values, data_sub.values, qlat_sub.values)) fdv_columns = pd.MultiIndex.from_product([range(nts), ['q', 'v', 'd']]).to_flat_index() flowveldepth = pd.concat( [pd.DataFrame(d, index=i, columns=fdv_columns) for i, d in results], copy=False) flowveldepth = flowveldepth.sort_index() outfile_base_name = (args.supernetwork).split(".")[0] flowveldepth.to_csv(f"{outfile_base_name}_mc_results.csv") print(flowveldepth)