def nwm_network_preprocess( supernetwork_parameters, waterbody_parameters, showtiming=False, verbose=False, debuglevel=0, ): if verbose: print("creating supernetwork connections set") if showtiming: start_time = time.time() # STEP 1: Build basic network connections graph, # read network parameters, identify waterbodies and gages, if any. connections, param_df, wbodies, gages = nnu.build_connections( supernetwork_parameters, ) break_network_at_waterbodies = waterbody_parameters.get( "break_network_at_waterbodies", False) break_network_at_gages = supernetwork_parameters.get( "break_network_at_gages", False) if ( not wbodies ): # Turn off any further reservoir processing if the network contains no waterbodies break_network_at_waterbodies = False if break_network_at_waterbodies: connections = nhd_network.replace_waterbodies_connections( connections, wbodies) if verbose: print("supernetwork connections set complete") if showtiming: print("... in %s seconds." % (time.time() - start_time)) ################################ ## STEP 3a: Read waterbody parameter file # waterbodies_values = supernetwork_values[12] # waterbodies_segments = supernetwork_values[13] # connections_tailwaters = supernetwork_values[4] if break_network_at_waterbodies: # Read waterbody parameters waterbodies_df = nhd_io.read_waterbody_df( waterbody_parameters, {"level_pool": wbodies.values()}) # Remove duplicate lake_ids and rows waterbodies_df = (waterbodies_df.reset_index().drop_duplicates( subset="lake_id").set_index("lake_id")) #Declare empty dataframe waterbody_types_df = pd.DataFrame() #Check if hybrid-usgs, hybrid-usace, or rfc type reservoirs are set to true wbtype = "hybrid_and_rfc" wb_params_hybrid_and_rfc = waterbody_parameters.get( wbtype, defaultdict(list)) # TODO: Convert these to `get` statments wbtype = "level_pool" wb_params_level_pool = waterbody_parameters.get( wbtype, defaultdict(list)) # TODO: Convert these to `get` statments waterbody_type_specified = False # NOTE: What are we accomplishing with this logic here? if wb_params_hybrid_and_rfc["reservoir_persistence_usgs"] \ or wb_params_hybrid_and_rfc["reservoir_persistence_usace"] \ or wb_params_hybrid_and_rfc["reservoir_rfc_forecasts"]: waterbody_type_specified = True waterbody_types_df = nhd_io.read_reservoir_parameter_file(wb_params_hybrid_and_rfc["reservoir_parameter_file"], \ wb_params_level_pool["level_pool_waterbody_id"], wbodies.values(),) # Remove duplicate lake_ids and rows waterbody_types_df = ( waterbody_types_df.reset_index().drop_duplicates( subset="lake_id").set_index("lake_id")) else: #Declare empty dataframes waterbody_types_df = pd.DataFrame() waterbodies_df = pd.DataFrame() # STEP 2: Identify Independent Networks and Reaches by Network if showtiming: start_time = time.time() if verbose: print("organizing connections into reaches ...") network_break_segments = set() if break_network_at_waterbodies: network_break_segments = network_break_segments.union(wbodies.values()) if break_network_at_gages: network_break_segments = network_break_segments.union(gages.keys()) independent_networks, reaches_bytw, rconn = nnu.organize_independent_networks( connections, network_break_segments, ) if verbose: print("reach organization complete") if showtiming: print("... in %s seconds." % (time.time() - start_time)) return ( connections, param_df, wbodies, waterbodies_df, waterbody_types_df, break_network_at_waterbodies, # Could this be inferred from the wbodies or waterbodies_df # Could this be inferred from the wbodies or waterbodies_df? Consider making this name less about the network and more about the reservoir simulation. waterbody_type_specified, # Seems like this could be inferred from waterbody_types_df... independent_networks, reaches_bytw, rconn, )
def main_v02(argv): args = _handle_args_v02(argv) ( supernetwork_parameters, waterbody_parameters, forcing_parameters, restart_parameters, output_parameters, run_parameters, parity_parameters, data_assimilation_parameters, diffusive_parameters, coastal_parameters, ) = _input_handler_v02(args) dt = run_parameters.get("dt", None) nts = run_parameters.get("nts", None) verbose = run_parameters.get("verbose", None) showtiming = run_parameters.get("showtiming", None) debuglevel = run_parameters.get("debuglevel", 0) break_network_at_waterbodies = run_parameters.get( "break_network_at_waterbodies", False) break_network_at_gages = supernetwork_parameters.get( "break_network_at_gages", False) if showtiming: main_start_time = time.time() if verbose: print("creating supernetwork connections set") if showtiming: start_time = time.time() # STEP 1: Build basic network connections graph connections, param_df, wbody_conn, gages = nnu.build_connections( supernetwork_parameters) if break_network_at_waterbodies: connections = nhd_network.replace_waterbodies_connections( connections, wbody_conn) if verbose: print("supernetwork connections set complete") if showtiming: print("... in %s seconds." % (time.time() - start_time)) ################################ ## STEP 3a: Read waterbody parameter file # waterbodies_values = supernetwork_values[12] # waterbodies_segments = supernetwork_values[13] # connections_tailwaters = supernetwork_values[4] waterbody_type_specified = False if break_network_at_waterbodies: # Read waterbody parameters waterbodies_df = nhd_io.read_waterbody_df( waterbody_parameters, {"level_pool": wbody_conn.values()}) # Remove duplicate lake_ids and rows waterbodies_df = (waterbodies_df.reset_index().drop_duplicates( subset="lake_id").set_index("lake_id")) #Declare empty dataframe waterbody_types_df = pd.DataFrame() #Check if hybrid-usgs, hybrid-usace, or rfc type reservoirs are set to true wbtype = "hybrid_and_rfc" wb_params_hybrid_and_rfc = waterbody_parameters.get( wbtype, defaultdict(list)) # TODO: Convert these to `get` statments wbtype = "level_pool" wb_params_level_pool = waterbody_parameters.get( wbtype, defaultdict(list)) # TODO: Convert these to `get` statments waterbody_type_specified = False if wb_params_hybrid_and_rfc["reservoir_persistence_usgs"] \ or wb_params_hybrid_and_rfc["reservoir_persistence_usace"] \ or wb_params_hybrid_and_rfc["reservoir_rfc_forecasts"]: waterbody_type_specified = True waterbody_types_df = nhd_io.read_reservoir_parameter_file(wb_params_hybrid_and_rfc["reservoir_parameter_file"], \ wb_params_level_pool["level_pool_waterbody_id"], wbody_conn.values(),) # Remove duplicate lake_ids and rows waterbody_types_df = ( waterbody_types_df.reset_index().drop_duplicates( subset="lake_id").set_index("lake_id")) else: #Declare empty dataframe waterbody_types_df = pd.DataFrame() waterbodies_df = pd.DataFrame() # STEP 2: Identify Independent Networks and Reaches by Network if showtiming: start_time = time.time() if verbose: print("organizing connections into reaches ...") network_break_segments = set() if break_network_at_waterbodies: network_break_segments = network_break_segments.union( wbody_conn.values()) if break_network_at_gages: network_break_segments = network_break_segments.union(gages.keys()) independent_networks, reaches_bytw, rconn = nnu.organize_independent_networks( connections, network_break_segments, ) if verbose: print("reach organization complete") if showtiming: print("... in %s seconds." % (time.time() - start_time)) if break_network_at_waterbodies: ## STEP 3c: Handle Waterbody Initial States # TODO: move step 3c into function in nnu, like other functions wrapped in main() if showtiming: start_time = time.time() if verbose: print("setting waterbody initial states ...") if restart_parameters.get("wrf_hydro_waterbody_restart_file", None): waterbodies_initial_states_df = nhd_io.get_reservoir_restart_from_wrf_hydro( restart_parameters["wrf_hydro_waterbody_restart_file"], restart_parameters["wrf_hydro_waterbody_ID_crosswalk_file"], restart_parameters[ "wrf_hydro_waterbody_ID_crosswalk_file_field_name"], restart_parameters[ "wrf_hydro_waterbody_crosswalk_filter_file"], restart_parameters[ "wrf_hydro_waterbody_crosswalk_filter_file_field_name"], ) else: # TODO: Consider adding option to read cold state from route-link file waterbodies_initial_ds_flow_const = 0.0 waterbodies_initial_depth_const = -1.0 # Set initial states from cold-state waterbodies_initial_states_df = pd.DataFrame( 0, index=waterbodies_df.index, columns=[ "qd0", "h0", ], dtype="float32") # TODO: This assignment could probably by done in the above call waterbodies_initial_states_df[ "qd0"] = waterbodies_initial_ds_flow_const waterbodies_initial_states_df[ "h0"] = waterbodies_initial_depth_const waterbodies_initial_states_df["index"] = range( len(waterbodies_initial_states_df)) waterbodies_df = pd.merge(waterbodies_df, waterbodies_initial_states_df, on="lake_id") if verbose: print("waterbody initial states complete") if showtiming: print("... in %s seconds." % (time.time() - start_time)) start_time = time.time() # STEP 4: Handle Channel Initial States if showtiming: start_time = time.time() if verbose: print("setting channel initial states ...") q0 = nnu.build_channel_initial_state(restart_parameters, param_df.index) if verbose: print("channel initial states complete") if showtiming: print("... in %s seconds." % (time.time() - start_time)) start_time = time.time() # STEP 5: Read (or set) QLateral Inputs if showtiming: start_time = time.time() if verbose: print("creating qlateral array ...") forcing_parameters["qts_subdivisions"] = run_parameters["qts_subdivisions"] forcing_parameters["nts"] = run_parameters["nts"] qlats = nnu.build_qlateral_array( forcing_parameters, param_df.index, nts, run_parameters.get("qts_subdivisions", 1), ) if verbose: print("qlateral array complete") if showtiming: print("... in %s seconds." % (time.time() - start_time)) # STEP 6 data_assimilation_csv = data_assimilation_parameters.get( "data_assimilation_csv", None) data_assimilation_folder = data_assimilation_parameters.get( "data_assimilation_timeslices_folder", None) last_obs_file = data_assimilation_parameters.get("wrf_hydro_last_obs_file", None) if data_assimilation_csv or data_assimilation_folder or last_obs_file: if showtiming: start_time = time.time() if verbose: print("creating usgs time_slice data array ...") usgs_df, lastobs_df, da_parameter_dict = nnu.build_data_assimilation( data_assimilation_parameters) if verbose: print("usgs array complete") if showtiming: print("... in %s seconds." % (time.time() - start_time)) else: usgs_df = pd.DataFrame() lastobs_df = pd.DataFrame() da_parameter_dict = {} ################### Main Execution Loop across ordered networks if showtiming: start_time = time.time() if verbose: if run_parameters.get("return_courant", False): print( f"executing routing computation, with Courant evaluation metrics returned" ) else: print(f"executing routing computation ...") # TODO: align compute_kernel and compute_method in run_parameters if run_parameters.get("compute_kernel", None): compute_func = run_parameters.get("compute_kernel", None) else: compute_func = run_parameters.get("compute_method", None) # TODO: Remove below. --compute-method=V02-structured-obj did not work on command line # compute_func = fast_reach.compute_network_structured_obj results = compute_nhd_routing_v02( connections, rconn, wbody_conn, reaches_bytw, compute_func, run_parameters.get("parallel_compute_method", None), run_parameters.get("subnetwork_target_size", 1), # The default here might be the whole network or some percentage... run_parameters.get("cpu_pool", None), run_parameters.get("dt"), run_parameters.get("nts", 1), run_parameters.get("qts_subdivisions", 1), independent_networks, param_df, q0, qlats, usgs_df, lastobs_df, da_parameter_dict, run_parameters.get("assume_short_ts", False), run_parameters.get("return_courant", False), waterbodies_df, waterbody_parameters, # TODO: Can we remove the dependence on this input? It's like passing argv down into the compute kernel -- seems like we can strip out the specifically needed items. waterbody_types_df, waterbody_type_specified, diffusive_parameters, ) if verbose: print("ordered reach computation complete") if showtiming: print("... in %s seconds." % (time.time() - start_time)) ################### Output Handling if showtiming: start_time = time.time() if verbose: print(f"Handling output ...") csv_output = output_parameters.get("csv_output", None) if csv_output: csv_output_folder = output_parameters["csv_output"].get( "csv_output_folder", None) csv_output_segments = csv_output.get("csv_output_segments", None) if (debuglevel <= -1) or csv_output: qvd_columns = pd.MultiIndex.from_product([range(nts), ["q", "v", "d"]]).to_flat_index() flowveldepth = pd.concat( [ pd.DataFrame(r[1], index=r[0], columns=qvd_columns) for r in results ], copy=False, ) if run_parameters.get("return_courant", False): courant_columns = pd.MultiIndex.from_product( [range(nts), ["cn", "ck", "X"]]).to_flat_index() courant = pd.concat( [ pd.DataFrame(r[2], index=r[0], columns=courant_columns) for r in results ], copy=False, ) if csv_output_folder: # create filenames # TO DO: create more descriptive filenames if supernetwork_parameters.get("title_string", None): filename_fvd = ("flowveldepth_" + supernetwork_parameters["title_string"] + ".csv") filename_courant = ("courant_" + supernetwork_parameters["title_string"] + ".csv") else: run_time_stamp = datetime.now().isoformat() filename_fvd = "flowveldepth_" + run_time_stamp + ".csv" filename_courant = "courant_" + run_time_stamp + ".csv" output_path = Path(csv_output_folder).resolve() flowveldepth = flowveldepth.sort_index() flowveldepth.to_csv(output_path.joinpath(filename_fvd)) if run_parameters.get("return_courant", False): courant = courant.sort_index() courant.to_csv(output_path.joinpath(filename_courant)) usgs_df_filtered = usgs_df[usgs_df.index.isin(csv_output_segments)] usgs_df_filtered.to_csv(output_path.joinpath("usgs_df.csv")) if debuglevel <= -1: print(flowveldepth) # directory containing WRF Hydro restart files wrf_hydro_restart_dir = output_parameters.get( "wrf_hydro_channel_restart_directory", None) if wrf_hydro_restart_dir: wrf_hydro_channel_restart_new_extension = output_parameters.get( "wrf_hydro_channel_restart_new_extension", "TRTE") # list of WRF Hydro restart files wrf_hydro_restart_files = sorted( Path(wrf_hydro_restart_dir).glob( output_parameters["wrf_hydro_channel_restart_pattern_filter"] + "[!" + wrf_hydro_channel_restart_new_extension + "]")) if len(wrf_hydro_restart_files) > 0: qvd_columns = pd.MultiIndex.from_product( [range(nts), ["q", "v", "d"]]).to_flat_index() flowveldepth = pd.concat( [ pd.DataFrame(r[1], index=r[0], columns=qvd_columns) for r in results ], copy=False, ) nhd_io.write_channel_restart_to_wrf_hydro( flowveldepth, wrf_hydro_restart_files, restart_parameters.get("wrf_hydro_channel_restart_file"), run_parameters.get("dt"), run_parameters.get("nts"), restart_parameters.get("wrf_hydro_channel_ID_crosswalk_file"), restart_parameters.get( "wrf_hydro_channel_ID_crosswalk_file_field_name"), wrf_hydro_channel_restart_new_extension, ) else: # print error and raise exception str = "WRF Hydro restart files not found - Aborting restart write sequence" raise AssertionError(str) chrtout_folder = output_parameters.get("wrf_hydro_channel_output_folder", None) if chrtout_folder: qvd_columns = pd.MultiIndex.from_product([range(nts), ["q", "v", "d"]]).to_flat_index() flowveldepth = pd.concat( [ pd.DataFrame(r[1], index=r[0], columns=qvd_columns) for r in results ], copy=False, ) wrf_hydro_channel_output_new_extension = output_parameters.get( "wrf_hydro_channel_output_new_extension", "TRTE") chrtout_files = sorted( Path(chrtout_folder).glob(output_parameters[ "wrf_hydro_channel_output_file_pattern_filter"])) nhd_io.write_q_to_wrf_hydro(flowveldepth, chrtout_files, run_parameters["qts_subdivisions"]) if verbose: print("output complete") if showtiming: print("... in %s seconds." % (time.time() - start_time)) ################### Parity Check if ("parity_check_input_folder" in parity_parameters or "parity_check_file" in parity_parameters or "parity_check_waterbody_file" in parity_parameters): if verbose: print( "conducting parity check, comparing WRF Hydro results against t-route results" ) if showtiming: start_time = time.time() parity_parameters["nts"] = nts parity_parameters["dt"] = dt build_tests.parity_check( parity_parameters, results, ) if verbose: print("parity check complete") if showtiming: print("... in %s seconds." % (time.time() - start_time)) if verbose: print("process complete") if showtiming: print("%s seconds." % (time.time() - main_start_time))
def main_v02(argv): args = _handle_args_v02(argv) ( supernetwork_parameters, waterbody_parameters, forcing_parameters, restart_parameters, output_parameters, run_parameters, parity_parameters, data_assimilation_parameters, diffusive_parameters, coastal_parameters, ) = _input_handler_v02(args) dt = run_parameters.get("dt", None) nts = run_parameters.get("nts", None) verbose = run_parameters.get("verbose", None) showtiming = run_parameters.get("showtiming", None) debuglevel = run_parameters.get("debuglevel", 0) break_network_at_waterbodies = run_parameters.get( "break_network_at_waterbodies", False) if showtiming: main_start_time = time.time() if verbose: print("creating supernetwork connections set") if showtiming: start_time = time.time() # STEP 1: Build basic network connections graph connections, param_df, wbodies, gages = nnu.build_connections( supernetwork_parameters) if break_network_at_waterbodies: connections = nhd_network.replace_waterbodies_connections( connections, wbodies) if verbose: print("supernetwork connections set complete") if showtiming: print("... in %s seconds." % (time.time() - start_time)) ################################ ## STEP 3a: Read waterbody parameter file # waterbodies_values = supernetwork_values[12] # waterbodies_segments = supernetwork_values[13] # connections_tailwaters = supernetwork_values[4] if break_network_at_waterbodies: # Read waterbody parameters waterbodies_df = nhd_io.read_waterbody_df( waterbody_parameters, {"level_pool": wbodies.values()}) # Remove duplicate lake_ids and rows waterbodies_df_reduced = (waterbodies_df.reset_index().drop_duplicates( subset="lake_id").set_index("lake_id")) else: waterbodies_df_reduced = pd.DataFrame() # STEP 2: Identify Independent Networks and Reaches by Network if showtiming: start_time = time.time() if verbose: print("organizing connections into reaches ...") independent_networks, reaches_bytw, rconn = nnu.organize_independent_networks( connections, list(waterbodies_df_reduced.index.values) if break_network_at_waterbodies else None, ) if verbose: print("reach organization complete") if showtiming: print("... in %s seconds." % (time.time() - start_time)) if break_network_at_waterbodies: ## STEP 3c: Handle Waterbody Initial States # TODO: move step 3c into function in nnu, like other functions wrapped in main() if showtiming: start_time = time.time() if verbose: print("setting waterbody initial states ...") if restart_parameters.get("wrf_hydro_waterbody_restart_file", None): waterbodies_initial_states_df = nhd_io.get_reservoir_restart_from_wrf_hydro( restart_parameters["wrf_hydro_waterbody_restart_file"], restart_parameters["wrf_hydro_waterbody_ID_crosswalk_file"], restart_parameters[ "wrf_hydro_waterbody_ID_crosswalk_file_field_name"], restart_parameters[ "wrf_hydro_waterbody_crosswalk_filter_file"], restart_parameters[ "wrf_hydro_waterbody_crosswalk_filter_file_field_name"], ) else: # TODO: Consider adding option to read cold state from route-link file waterbodies_initial_ds_flow_const = 0.0 waterbodies_initial_depth_const = -1.0 # Set initial states from cold-state waterbodies_initial_states_df = pd.DataFrame( 0, index=waterbodies_df.index, columns=[ "qd0", "h0", ], dtype="float32") # TODO: This assignment could probably by done in the above call waterbodies_initial_states_df[ "qd0"] = waterbodies_initial_ds_flow_const waterbodies_initial_states_df[ "h0"] = waterbodies_initial_depth_const waterbodies_initial_states_df["index"] = range( len(waterbodies_initial_states_df)) waterbodies_df_reduced = pd.merge(waterbodies_df_reduced, waterbodies_initial_states_df, on="lake_id") if verbose: print("waterbody initial states complete") if showtiming: print("... in %s seconds." % (time.time() - start_time)) start_time = time.time() # STEP 4: Handle Channel Initial States if showtiming: start_time = time.time() if verbose: print("setting channel initial states ...") q0 = nnu.build_channel_initial_state(restart_parameters, param_df.index) if verbose: print("channel initial states complete") if showtiming: print("... in %s seconds." % (time.time() - start_time)) start_time = time.time() # STEP 5: Read (or set) QLateral Inputs if showtiming: start_time = time.time() if verbose: print("creating qlateral array ...") forcing_parameters["qts_subdivisions"] = run_parameters["qts_subdivisions"] forcing_parameters["nts"] = run_parameters["nts"] qlats = nnu.build_qlateral_array( forcing_parameters, param_df.index, nts, run_parameters.get("qts_subdivisions", 1), ) if verbose: print("qlateral array complete") if showtiming: print("... in %s seconds." % (time.time() - start_time)) # STEP 6 data_assimilation_csv = data_assimilation_parameters.get( "data_assimilation_csv", None) data_assimilation_filter = data_assimilation_parameters.get( "data_assimilation_filter", None) if data_assimilation_csv or data_assimilation_filter: if showtiming: start_time = time.time() if verbose: print("creating usgs time_slice data array ...") usgs_df = nnu.build_data_assimilation(data_assimilation_parameters) if verbose: print("usgs array complete") if showtiming: print("... in %s seconds." % (time.time() - start_time)) else: usgs_df = pd.DataFrame() last_obs_file = data_assimilation_parameters.get("wrf_hydro_last_obs_file", None) last_obs_df = pd.DataFrame() ################### Main Execution Loop across ordered networks if showtiming: start_time = time.time() if verbose: if run_parameters.get("return_courant", False): print( f"executing routing computation, with Courant evaluation metrics returned" ) else: print(f"executing routing computation ...") # TODO: align compute_kernel and compute_method in run_parameters if run_parameters.get("compute_kernel", None): compute_func = run_parameters.get("compute_kernel", None) else: compute_func = run_parameters.get("compute_method", None) # TODO: Remove below. --compute-method=V02-structured-obj did not work on command line # compute_func = fast_reach.compute_network_structured_obj results = compute_nhd_routing_v02( connections, rconn, wbodies, reaches_bytw, compute_func, run_parameters.get("parallel_compute_method", None), run_parameters.get("subnetwork_target_size", 1), # The default here might be the whole network or some percentage... run_parameters.get("cpu_pool", None), run_parameters.get("dt"), run_parameters.get("nts", 1), run_parameters.get("qts_subdivisions", 1), independent_networks, param_df, q0, qlats, usgs_df, last_obs_df, run_parameters.get("assume_short_ts", False), run_parameters.get("return_courant", False), waterbodies_df_reduced, diffusive_parameters, ) if verbose: print("ordered reach computation complete") if showtiming: print("... in %s seconds." % (time.time() - start_time)) ################### Output Handling if showtiming: start_time = time.time() if verbose: print(f"Handling output ...") if output_parameters: csv_output = output_parameters.get("csv_output", None) if csv_output: csv_output_folder = output_parameters["csv_output"].get( "csv_output_folder", None) csv_output_segments = csv_output.get("csv_output_segments", None) if (debuglevel <= -1) or csv_output: qvd_columns = pd.MultiIndex.from_product([range(nts), ["q", "v", "d"]]).to_flat_index() if run_parameters.get("return_courant", False): flowveldepth = pd.concat( [ pd.DataFrame(d, index=i, columns=qvd_columns) for i, d, c in results ], copy=False, ) else: flowveldepth = pd.concat( [ pd.DataFrame(d, index=i, columns=qvd_columns) for i, d in results ], copy=False, ) if run_parameters.get("return_courant", False): courant_columns = pd.MultiIndex.from_product( [range(nts), ["cn", "ck", "X"]]).to_flat_index() courant = pd.concat( [ pd.DataFrame(c, index=i, columns=courant_columns) for i, d, c in results ], copy=False, ) if csv_output_folder: # create filenames # TO DO: create more descriptive filenames if supernetwork_parameters.get("title_string", None): filename_fvd = ("flowveldepth_" + supernetwork_parameters["title_string"] + ".csv") filename_courant = ("courant_" + supernetwork_parameters["title_string"] + ".csv") else: run_time_stamp = datetime.now().isoformat() filename_fvd = "flowveldepth_" + run_time_stamp + ".csv" filename_courant = "courant_" + run_time_stamp + ".csv" output_path = pathlib.Path(csv_output_folder).resolve() flowveldepth = flowveldepth.sort_index() flowveldepth.to_csv(output_path.joinpath(filename_fvd)) if run_parameters.get("return_courant", False): courant = courant.sort_index() courant.to_csv(output_path.joinpath(filename_courant)) usgs_df_filtered = usgs_df[usgs_df.index.isin(csv_output_segments)] usgs_df_filtered.to_csv(output_path.joinpath("usgs_df.csv")) if debuglevel <= -1: print(flowveldepth) if verbose: print("output complete") if showtiming: print("... in %s seconds." % (time.time() - start_time)) ################### Parity Check if ("parity_check_input_folder" in parity_parameters or "parity_check_file" in parity_parameters or "parity_check_waterbody_file" in parity_parameters): if verbose: print( "conducting parity check, comparing WRF Hydro results against t-route results" ) if showtiming: start_time = time.time() parity_parameters["nts"] = nts parity_parameters["dt"] = dt build_tests.parity_check( parity_parameters, results, ) if verbose: print("parity check complete") if showtiming: print("... in %s seconds." % (time.time() - start_time)) if verbose: print("process complete") if showtiming: print("%s seconds." % (time.time() - main_start_time))
def nwm_network_preprocess( supernetwork_parameters, waterbody_parameters, showtiming=False, verbose=False, debuglevel=0, ): if verbose: print("creating supernetwork connections set") if showtiming: start_time = time.time() # STEP 1: Build basic network connections graph, # read network parameters, identify waterbodies and gages, if any. connections, param_df, wbodies, gages = nnu.build_connections( supernetwork_parameters, ) break_network_at_waterbodies = waterbody_parameters.get( "break_network_at_waterbodies", False) break_network_at_gages = supernetwork_parameters.get( "break_network_at_gages", False) if ( not wbodies ): # Turn off any further reservoir processing if the network contains no waterbodies break_network_at_waterbodies = False if break_network_at_waterbodies: connections = nhd_network.replace_waterbodies_connections( connections, wbodies) if verbose: print("supernetwork connections set complete") if showtiming: print("... in %s seconds." % (time.time() - start_time)) ################################ ## STEP 3a: Read waterbody parameter file # waterbodies_values = supernetwork_values[12] # waterbodies_segments = supernetwork_values[13] # connections_tailwaters = supernetwork_values[4] if break_network_at_waterbodies: # Read waterbody parameters waterbodies_df = nhd_io.read_waterbody_df( waterbody_parameters, {"level_pool": wbodies.values()}) # Remove duplicate lake_ids and rows waterbodies_df = (waterbodies_df.reset_index().drop_duplicates( subset="lake_id").set_index("lake_id")) else: waterbodies_df = pd.DataFrame() # STEP 2: Identify Independent Networks and Reaches by Network if showtiming: start_time = time.time() if verbose: print("organizing connections into reaches ...") network_break_segments = set() if break_network_at_waterbodies: network_break_segments = network_break_segments.union(wbodies.values()) if break_network_at_gages: network_break_segments = network_break_segments.union(gages.keys()) independent_networks, reaches_bytw, rconn = nnu.organize_independent_networks( connections, network_break_segments, ) if verbose: print("reach organization complete") if showtiming: print("... in %s seconds." % (time.time() - start_time)) return ( connections, param_df, wbodies, waterbodies_df, break_network_at_waterbodies, independent_networks, reaches_bytw, rconn, )
def main(): ( supernetwork_parameters, waterbody_parameters, forcing_parameters, restart_parameters, output_parameters, run_parameters, parity_parameters, ) = _input_handler() dt = run_parameters.get("dt", None) nts = run_parameters.get("nts", None) verbose = run_parameters.get("verbose", None) showtiming = run_parameters.get("showtiming", None) debuglevel = run_parameters.get("debuglevel", 0) if verbose: print("creating supernetwork connections set") if showtiming: start_time = time.time() # STEP 1: Build basic network connections graph connections, wbodies, param_df = nnu.build_connections( supernetwork_parameters, dt) if verbose: print("supernetwork connections set complete") if showtiming: print("... in %s seconds." % (time.time() - start_time)) # STEP 2: Identify Independent Networks and Reaches by Network if showtiming: start_time = time.time() if verbose: print("organizing connections into reaches ...") independent_networks, reaches_bytw, rconn = nnu.organize_independent_networks( connections) if verbose: print("reach organization complete") if showtiming: print("... in %s seconds." % (time.time() - start_time)) # STEP 4: Handle Channel Initial States if showtiming: start_time = time.time() if verbose: print("setting channel initial states ...") q0 = nnu.build_channel_initial_state(restart_parameters, param_df.index) if verbose: print("channel initial states complete") if showtiming: print("... in %s seconds." % (time.time() - start_time)) start_time = time.time() # STEP 5: Read (or set) QLateral Inputs if showtiming: start_time = time.time() if verbose: print("creating qlateral array ...") qlats = nnu.build_qlateral_array(forcing_parameters, connections.keys(), nts) if verbose: print("qlateral array complete") if showtiming: print("... in %s seconds." % (time.time() - start_time)) ################### Main Execution Loop across ordered networks if showtiming: main_start_time = time.time() if verbose: print(f"executing routing computation ...") if run_parameters.get("compute_method", None) == "standard cython compute network": compute_func = mc_reach.compute_network else: compute_func = mc_reach.compute_network results = compute_nhd_routing_v02( connections, rconn, reaches_bytw, compute_func, run_parameters.get("parallel_compute_method", None), run_parameters.get("subnetwork_target_size", 1), # The default here might be the whole network or some percentage... run_parameters.get("cpu_pool", None), run_parameters.get("nts", 1), run_parameters.get("qts_subdivisions", 1), independent_networks, param_df, qlats, q0, run_parameters.get("assume_short_ts", False), ) csv_output_folder = output_parameters.get("csv_output_folder", None) if (debuglevel <= -1) or csv_output_folder: qvd_columns = pd.MultiIndex.from_product([range(nts), ["q", "v", "d"]]).to_flat_index() flowveldepth = pd.concat( [ pd.DataFrame(d, index=i, columns=qvd_columns) for i, d in results ], copy=False, ) if csv_output_folder: flowveldepth = flowveldepth.sort_index() output_path = pathlib.Path(csv_output_folder).resolve() flowveldepth.to_csv( output_path.joinpath(f"{args.supernetwork}.csv")) if debuglevel <= -1: print(flowveldepth) if verbose: print("ordered reach computation complete") if showtiming: print("... in %s seconds." % (time.time() - start_time)) if "parity_check_input_folder" in parity_parameters: if verbose: print( "conducting parity check, comparing WRF Hydro results against t-route results" ) build_tests.parity_check( parity_parameters, run_parameters["nts"], run_parameters["dt"], results, )