Esempio n. 1
0
def main():

    global connections
    global networks
    global flowdepthvel

    verbose = True
    debuglevel = 0
    showtiming = True

    test_folder = os.path.join(root, r"test")
    geo_input_folder = os.path.join(test_folder, r"input", r"geo")

    # TODO: Make these commandline args
    """##NHD Subset (Brazos/Lower Colorado)"""
    # supernetwork = 'Brazos_LowerColorado_ge5'
    supernetwork = "Pocono_TEST1"
    """##NHD CONUS order 5 and greater"""
    # supernetwork = 'CONUS_ge5'
    """These are large -- be careful"""
    # supernetwork = 'Mainstems_CONUS'
    # supernetwork = 'CONUS_FULL_RES_v20'
    # supernetwork = 'CONUS_Named_Streams' #create a subset of the full resolution by reading the GNIS field
    # supernetwork = 'CONUS_Named_combined' #process the Named streams through the Full-Res paths to join the many hanging reaches

    if verbose:
        print("creating supernetwork connections set")
    if showtiming:
        start_time = time.time()
    # STEP 1
    supernetwork_data, supernetwork_values = nnu.set_networks(
        supernetwork=supernetwork,
        geo_input_folder=geo_input_folder,
        verbose=False
        # , verbose = verbose
        ,
        debuglevel=debuglevel,
    )
    if verbose:
        print("supernetwork connections set complete")
    if showtiming:
        print("... in %s seconds." % (time.time() - start_time))

    # STEP 2
    if showtiming:
        start_time = time.time()
    if verbose:
        print("organizing connections into reaches ...")
    networks = nru.compose_networks(
        supernetwork_values,
        verbose=False
        # , verbose = verbose
        ,
        debuglevel=debuglevel,
        showtiming=showtiming,
    )
    if verbose:
        print("reach organization complete")
    if showtiming:
        print("... in %s seconds." % (time.time() - start_time))

    if showtiming:
        start_time = time.time()
    connections = supernetwork_values[0]

    connections, conn_data = separate_data(
        connections,
        {
            "key": supernetwork_data["key_col"],
            "length": supernetwork_data["length_col"],
            "bottomwidth": supernetwork_data["bottomwidth_col"],
            "topwidth": supernetwork_data["topwidth_col"],
            "manningn": supernetwork_data["manningn_col"],
            "ChSlp": supernetwork_data["ChSlp_col"],
            "slope": supernetwork_data["slope_col"],
            "topwidthcc": supernetwork_data["topwidthcc_col"],
            "manningncc": supernetwork_data["manningncc_col"],
        },
    )
    conn_data = conn_data[conn_data[:, 0].argsort()]

    # Index Map:
    # flow_prev, depth_prev, vel_prev, qlat_prev
    # flow_curr, depth_curr, vel_curr, qlat_curr
    flowdepthvel = np.zeros((len(connections), 8))
    RN = dict(nhd_network.reverse_network(connections))

    for ts in range(1440):
        for n in nhd_network.kahn_toposort(connections):
            process_edge(n, RN, flowdepthvel, conn_data)

    with np.printoptions(precision=5, suppress=True, linewidth=120):
        print(flowdepthvel)
    sorted_conns = sorted(connections.keys())
    print(sorted_conns, all(conn_data[:, 0] == sorted_conns))

    # parallelcompute = False
    # if not parallelcompute:
    #     if verbose:
    #         print("executing computation on ordered reaches ...")
    #
    #     for terminal_segment, network in networks.items():
    #         compute_network(
    #             network,
    #             conn_data,
    #             supernetwork_data,
    #             connections,
    #             flowdepthvel,
    #             verbose=False,
    #             debuglevel=debuglevel,
    #         )
    #         print(f"{terminal_segment}")
    #         if showtiming:
    #             print("... in %s seconds." % (time.time() - start_time))
    #
    # else:
    #     if verbose:
    #         print(f"executing parallel computation on ordered reaches .... ")
    #     # for terminal_segment, network in networks.items():
    #     #    print(terminal_segment, network)
    #     # print(tuple(([x for x in networks.keys()][i], [x for x in networks.values()][i]) for i in range(len(networks))))
    #     nslist = (
    #         [
    #             network,
    #             conn_data,  # TODO: This should probably be global...
    #             connections,
    #             flowdepthvel,
    #             False,
    #             debuglevel,
    #         ]
    #         for terminal_segment, network in networks.items()
    #     )
    #     with multiprocessing.Pool() as pool:
    #         results = pool.starmap(compute_network, nslist)

    if verbose:
        print("ordered reach computation complete")
    if showtiming:
        print("... in %s seconds." % (time.time() - start_time))
def main():

    global connections

    print(
        'This script demonstrates the parallel traversal of reaches developed from NHD datasets'
    )

    verbose = arg1
    debuglevel = arg2
    showtiming = arg3

    test_folder = os.path.join(root, r'test')
    geo_input_folder = os.path.join(test_folder, r'input', r'geo')

    #TODO: Make these commandline args
    supernetwork = arg4
    """##NHD Subset (Brazos/Lower Colorado)"""
    # supernetwork = 'Brazos_LowerColorado_ge5'
    """##NHD CONUS order 5 and greater"""
    # supernetwork = 'CONUS_ge5'
    """These are large -- be careful"""
    # supernetwork = 'Mainstems_CONUS'
    # supernetwork = 'CONUS_FULL_RES_v20'
    # supernetwork = 'CONUS_Named_Streams' #create a subset of the full resolution by reading the GNIS field
    # supernetwork = 'CONUS_Named_combined' #process the Named streams through the Full-Res paths to join the many hanging reaches

    if verbose: print('creating supernetwork connections set')
    if showtiming: start_time = time.time()
    #STEP 1
    supernetwork_data, supernetwork_values = nnu.set_networks(
        supernetwork=supernetwork,
        geo_input_folder=geo_input_folder,
        verbose=False
        # , verbose = verbose
        ,
        debuglevel=debuglevel)
    if verbose: print('supernetwork connections set complete')
    if showtiming: print("... in %s seconds." % (time.time() - start_time))

    #STEP 2
    if showtiming: start_time = time.time()
    if verbose: print('organizing connections into networks and reaches ...')
    networks = nru.compose_networks(
        supernetwork_values,
        verbose=False
        # , verbose = verbose
        ,
        debuglevel=debuglevel,
        showtiming=showtiming)
    if verbose: print('reach organization complete')
    if showtiming: print("... in %s seconds." % (time.time() - start_time))

    #STEP 3
    if verbose: print(f'Now computing the reaches in parallel')
    if verbose: print(f'This is just a DUMMY computation')
    if verbose:
        print(
            f'Only the number of potentially parallelizable reaches is shown for each order'
        )

    connections = supernetwork_values[0]
    #initialize flowdepthvel dict
    parallel_split = -1  # -1 turns off the splitting and runs everything through the lumped execution

    ##STEP 3_ -- Small Networks
    # Another parallelization method is to simply execute a network or group of networks independent from the others.
    ##TODO:Come back to this -- Essentially, we isolated each network and found that parallel speedup was minimal.
    #TODO: add the Parallel Split to show network splitting
    ##Each of the subgroups could use one of the three parallel methods or simply execute
    ##serially for itself (with other subgoups running at the same time).
    ##Probably with some effort, this could be effective.
    if parallel_split >= 0: print(r'DO NOT RUN WITH `parallel_split` >= 0')

    #STEP 3a -- Large Networks by total tree depth
    if showtiming: start_time = time.time()
    if verbose:
        print(
            f'executing computation on reaches ordered by distance from terminal node'
        )

    large_networks = {terminal_segment: network \
                      for terminal_segment, network in networks.items() \
                      if network['maximum_reach_seqorder'] > parallel_split}
    # print(large_networks)
    compute_network_parallel_totaltreedepth(
        large_networks,
        supernetwork_data=supernetwork_data
        # , connections = connections
        # , verbose = False
        ,
        verbose=verbose,
        debuglevel=debuglevel)
    if verbose:
        print(
            f'ordered reach traversal complete for reaches ordered by distance from terminal node'
        )
    if showtiming: print("... in %s seconds." % (time.time() - start_time))

    #STEP 3b -- Large Networks by total tree depth -- segregating the headwaters
    if showtiming: start_time = time.time()
    if verbose:
        print(
            f'executing computation for all headwaters, then by reaches ordered by distance from terminal node'
        )

    large_networks = {terminal_segment: network \
                      for terminal_segment, network in networks.items() \
                      if network['maximum_reach_seqorder'] > parallel_split}
    # print(large_networks)
    compute_network_parallel_totaltreedepth_wHEADS(
        large_networks,
        supernetwork_data=supernetwork_data
        # , connections = connections
        # , verbose = False
        ,
        verbose=verbose,
        debuglevel=debuglevel)
    if verbose:
        print(f'ordered reach computation complete for doing headwaters first')
    if showtiming: print("... in %s seconds." % (time.time() - start_time))

    ##STEP 3c -- Opportunistic Network Search
    # This method does as many as it can immediately in each round
    # (which means that there are not many left in the later rounds
    # of parallelization...)
    if showtiming: start_time = time.time()
    if verbose: print(f'executing computation on reaches opportunistically')
    if verbose:
        print(
            f'(this takes a little longer... we need to improve the method of assigning the opportunistic order...)'
        )

    large_networks = {terminal_segment: network \
                      for terminal_segment, network in networks.items() \
                      if network['maximum_reach_seqorder'] > parallel_split}
    # print(large_networks)
    compute_network_parallel_opportunistic(
        large_networks,
        supernetwork_data=supernetwork_data
        # , connections = connections
        # , verbose = False
        ,
        verbose=verbose,
        debuglevel=debuglevel)
    if verbose: print(f'opportunistic reach computation complete')
    if showtiming: print("... in %s seconds." % (time.time() - start_time))

    print(printout1)
Esempio n. 3
0
def main():
    args = _handle_args()

    global connections
    global networks
    global flowveldepth

    supernetwork = args.supernetwork
    break_network_at_waterbodies = args.break_network_at_waterbodies

    dt = float(args.dt)
    nts = int(args.nts)
    qlat_const = float(args.qlat_const)

    debuglevel = -1 * int(args.debuglevel)
    verbose = args.verbose
    showtiming = args.showtiming
    write_csv_output = args.write_csv_output
    write_nc_output = args.write_nc_output
    assume_short_ts = args.assume_short_ts
    parallel_compute = args.parallel_compute

    run_pocono_test = args.run_pocono_test

    if run_pocono_test:
        if verbose:
            print("running test case for Pocono_TEST2 domain")
        # Overwrite the following test defaults
        supernetwork = "Pocono_TEST2"
        break_network_at_waterbodies = False
        dt = 300
        nts = 144
        write_csv_output = True
        write_nc_output = True

    test_folder = os.path.join(root, r"test")
    geo_input_folder = os.path.join(test_folder, r"input", r"geo")

    # TODO: Make these commandline args
    """##NHD Subset (Brazos/Lower Colorado)"""
    # supernetwork = 'Brazos_LowerColorado_Named_Streams'
    # supernetwork = 'Brazos_LowerColorado_ge5'
    # supernetwork = 'Pocono_TEST1'
    # supernetwork = 'Pocono_TEST2'
    """##NHD CONUS order 5 and greater"""
    # supernetwork = 'CONUS_ge5'
    """These are large -- be careful"""
    # supernetwork = 'Mainstems_CONUS'
    # supernetwork = 'CONUS_FULL_RES_v20'
    # supernetwork = 'CONUS_Named_Streams' #create a subset of the full resolution by reading the GNIS field
    # supernetwork = 'CONUS_Named_combined' #process the Named streams through the Full-Res paths to join the many hanging reaches

    if verbose:
        print("creating supernetwork connections set")
    if showtiming:
        start_time = time.time()
    # STEP 1
    supernetwork_data, supernetwork_values = nnu.set_networks(
        supernetwork=supernetwork,
        geo_input_folder=geo_input_folder,
        verbose=False
        # , verbose = verbose
        ,
        debuglevel=debuglevel,
    )
    if verbose:
        print("supernetwork connections set complete")
    if showtiming:
        print("... in %s seconds." % (time.time() - start_time))

    # STEP 2
    if showtiming:
        start_time = time.time()
    if verbose:
        print("organizing connections into reaches ...")
    networks = nru.compose_networks(
        supernetwork_values,
        break_network_at_waterbodies=break_network_at_waterbodies,
        verbose=False,
        debuglevel=debuglevel,
        showtiming=showtiming,
    )
    if verbose:
        print("reach organization complete")
    if showtiming:
        print("... in %s seconds." % (time.time() - start_time))
        start_time = time.time()

    connections = supernetwork_values[0]

    # initialize flowveldepth dict
    flowveldepth = {
        connection: {
            "qlatval": [],
            "time": [],
            "flowval": [],
            "velval": [],
            "depthval": [],
        }
        for connection in connections
    }

    # Lateral flow
    if (run_pocono_test
        ):  # test 1. Take lateral flow from wrf-hydro output from Pocono Basin
        ql_input_folder = os.path.join(
            root,
            r"test/input/geo/PoconoSampleData2/Pocono_ql_testsamp1_nwm_mc.csv")
        ql = pd.read_csv(ql_input_folder, index_col=0)

    else:
        ql = pd.DataFrame(qlat_const,
                          index=connections.keys(),
                          columns=range(nts),
                          dtype="float32")

    for index, row in ql.iterrows():
        flowveldepth[index]["qlatval"] = row.tolist()

    if not parallel_compute:
        if verbose:
            print("executing computation on ordered reaches ...")

        for terminal_segment, network in networks.items():
            if verbose:
                print(
                    f"for network terminiating at segment {terminal_segment}")
            compute_network(
                terminal_segment=terminal_segment,
                network=network,
                supernetwork_data=supernetwork_data,
                nts=nts,
                dt=dt,
                verbose=verbose,
                debuglevel=debuglevel,
                write_csv_output=write_csv_output,
                write_nc_output=write_nc_output,
                assume_short_ts=assume_short_ts,
            )
            if showtiming:
                print("... in %s seconds." % (time.time() - start_time))
    else:  # serial execution
        if verbose:
            print(f"executing parallel computation on ordered reaches .... ")
        # for terminal_segment, network in networks.items():
        #    print(terminal_segment, network)
        # print(tuple(([x for x in networks.keys()][i], [x for x in networks.values()][i]) for i in range(len(networks))))
        nslist = (
            [
                terminal_segment,
                network,
                supernetwork_data,  # TODO: This should probably be global...
                nts,
                dt,
                verbose,
                debuglevel,
                write_csv_output,
                write_nc_output,
                assume_short_ts,
            ] for terminal_segment, network in networks.items())
        with multiprocessing.Pool() as pool:
            results = pool.starmap(compute_network, nslist)

    if verbose:
        print("ordered reach computation complete")
    if showtiming:
        print("... in %s seconds." % (time.time() - start_time))
def main():

    global connections
    global networks
    global flowdepthvel

    verbose = True
    debuglevel = 0
    showtiming = True

    test_folder = os.path.join(root, r'test')
    geo_input_folder = os.path.join(test_folder, r'input', r'geo')

    # TODO: Make these commandline args
    """##NHD Subset (Brazos/Lower Colorado)"""
    #supernetwork = 'Brazos_LowerColorado_ge5'
    supernetwork = 'Pocono_TEST1'
    """##NHD CONUS order 5 and greater"""
    # supernetwork = 'CONUS_ge5'
    """These are large -- be careful"""
    # supernetwork = 'Mainstems_CONUS'
    # supernetwork = 'CONUS_FULL_RES_v20'
    # supernetwork = 'CONUS_Named_Streams' #create a subset of the full resolution by reading the GNIS field
    # supernetwork = 'CONUS_Named_combined' #process the Named streams through the Full-Res paths to join the many hanging reaches

    if verbose: print('creating supernetwork connections set')
    if showtiming: start_time = time.time()
    # STEP 1
    supernetwork_data, supernetwork_values = nnu.set_networks(
        supernetwork=supernetwork,
        geo_input_folder=geo_input_folder,
        verbose=False
        # , verbose = verbose
        ,
        debuglevel=debuglevel)
    if verbose: print('supernetwork connections set complete')
    if showtiming: print("... in %s seconds." % (time.time() - start_time))

    # STEP 2
    if showtiming: start_time = time.time()
    if verbose: print('organizing connections into reaches ...')
    networks = nru.compose_networks(
        supernetwork_values,
        verbose=False
        # , verbose = verbose
        ,
        debuglevel=debuglevel,
        showtiming=showtiming)
    if verbose: print('reach organization complete')
    if showtiming: print("... in %s seconds." % (time.time() - start_time))

    if showtiming: start_time = time.time()
    connections = supernetwork_values[0]

    flowdepthvel = {
        connection: {
            'flow': {
                'prev': 0,
                'curr': 0
            },
            'depth': {
                'prev': 0,
                'curr': 0
            },
            'vel': {
                'prev': 0,
                'curr': 0
            },
            'qlat': {
                'prev': 0,
                'curr': 0
            }
        }
        for connection in connections
    }

    parallelcompute = True
    if not parallelcompute:
        if verbose: print('executing computation on ordered reaches ...')

        for terminal_segment, network in networks.items():
            compute_network(
                terminal_segment=terminal_segment,
                network=network,
                supernetwork_data=supernetwork_data
                # , connections = connections
                ,
                verbose=False
                # , verbose = verbose
                ,
                debuglevel=debuglevel)
            print(f'{terminal_segment}')
            if showtiming:
                print("... in %s seconds." % (time.time() - start_time))

    else:
        if verbose:
            print(f'executing parallel computation on ordered reaches .... ')
        # for terminal_segment, network in networks.items():
        #    print(terminal_segment, network)
        # print(tuple(([x for x in networks.keys()][i], [x for x in networks.values()][i]) for i in range(len(networks))))
        nslist = (
            [
                terminal_segment,
                network,
                supernetwork_data  # TODO: This should probably be global...
                ,
                False,
                debuglevel
            ] for terminal_segment, network in networks.items())
        with multiprocessing.Pool() as pool:
            results = pool.starmap(compute_network, nslist)

    if verbose: print('ordered reach computation complete')
    if showtiming: print("... in %s seconds." % (time.time() - start_time))
Esempio n. 5
0
        debuglevel=debuglevel,
    )
    if verbose:
        print("supernetwork connections set complete")
    if showtiming:
        print("... in %s seconds." % (time.time() - start_time))

    # STEP 2
    if showtiming:
        start_time = time.time()
    if verbose:
        print("organizing connections into reaches ...")
    networks = nru.compose_networks(
        supernetwork_values,
        verbose=False
        # , verbose = verbose
        ,
        debuglevel=debuglevel,
        showtiming=showtiming,
    )
    if verbose:
        print("reach organization complete")
    if showtiming:
        print("... in %s seconds." % (time.time() - start_time))

    import tensorflow as tf

    # Tensorflow is messy -- it needs to be loaded after reading the data
    # or it will mess up the libraries used by xarray to read the netcdf
    model = tf.keras.models.load_model("ML_MC_PRES6")

    connections = supernetwork_values[0]
def main():

    args = _handle_args()

    global connections
    global networks
    global flowdepthvel

    debuglevel = -1 * int(args.debuglevel)
    verbose = args.verbose
    showtiming = args.showtiming
    supernetwork = args.supernetwork
    break_network_at_waterbodies = args.break_network_at_waterbodies
    write_output = args.write_output
    assume_short_ts = args.assume_short_ts

    test_folder = os.path.join(root, r"test")
    geo_input_folder = os.path.join(test_folder, r"input", r"geo")

    # TODO: Make these commandline args
    """##NHD Subset (Brazos/Lower Colorado)"""
    # supernetwork = 'Brazos_LowerColorado_Named_Streams'
    # supernetwork = 'Brazos_LowerColorado_ge5'
    # supernetwork = 'Pocono_TEST1'
    """##NHD CONUS order 5 and greater"""
    # supernetwork = 'CONUS_ge5'
    """These are large -- be careful"""
    # supernetwork = 'Mainstems_CONUS'
    # supernetwork = 'CONUS_FULL_RES_v20'
    # supernetwork = 'CONUS_Named_Streams' #create a subset of the full resolution by reading the GNIS field
    # supernetwork = 'CONUS_Named_combined' #process the Named streams through the Full-Res paths to join the many hanging reaches

    if verbose:
        print("creating supernetwork connections set")
    if showtiming:
        start_time = time.time()
    # STEP 1
    supernetwork_data, supernetwork_values = nnu.set_networks(
        supernetwork=supernetwork,
        geo_input_folder=geo_input_folder,
        verbose=False
        # , verbose = verbose
        ,
        debuglevel=debuglevel,
    )
    if verbose:
        print("supernetwork connections set complete")
    if showtiming:
        print("... in %s seconds." % (time.time() - start_time))

    # STEP 2
    if showtiming:
        start_time = time.time()
    if verbose:
        print("organizing connections into reaches ...")
    networks = nru.compose_networks(
        supernetwork_values,
        break_network_at_waterbodies=break_network_at_waterbodies,
        verbose=False,
        debuglevel=debuglevel,
        showtiming=showtiming,
    )
    if verbose:
        print("reach organization complete")
    if showtiming:
        print("... in %s seconds." % (time.time() - start_time))

    if showtiming:
        start_time = time.time()
    connections = supernetwork_values[0]

    flowdepthvel = {
        connection: {
            "flow": {
                "prev": 0,
                "curr": 0
            },
            "depth": {
                "prev": 0,
                "curr": 0
            },
            "vel": {
                "prev": 0,
                "curr": 0
            },
            "qlat": {
                "prev": 0,
                "curr": 0
            },
        }
        for connection in connections
    }

    parallelcompute = False
    if not parallelcompute:
        if verbose:
            print("executing computation on ordered reaches ...")

        for terminal_segment, network in networks.items():
            compute_network(
                terminal_segment=terminal_segment,
                network=network,
                supernetwork_data=supernetwork_data,
                verbose=False,
                debuglevel=debuglevel,
                write_output=write_output,
                assume_short_ts=assume_short_ts,
            )
            print(f"{terminal_segment}")
            if showtiming:
                print("... in %s seconds." % (time.time() - start_time))
    else:
        if verbose:
            print(f"executing parallel computation on ordered reaches .... ")
        # for terminal_segment, network in networks.items():
        #    print(terminal_segment, network)
        # print(tuple(([x for x in networks.keys()][i], [x for x in networks.values()][i]) for i in range(len(networks))))
        nslist = (
            [
                terminal_segment,
                network,
                supernetwork_data,  # TODO: This should probably be global...
                False,
                debuglevel,
                write_output,
                assume_short_ts,
            ] for terminal_segment, network in networks.items())
        with multiprocessing.Pool() as pool:
            results = pool.starmap(compute_network, nslist)

    if verbose:
        print("ordered reach computation complete")
    if showtiming:
        print("... in %s seconds." % (time.time() - start_time))
def main():

    global connections
    global networks
    global flowdepthvel

    verbose = True
    debuglevel = 0
    showtiming = True

    test_folder = os.path.join(root, r"test")
    geo_input_folder = os.path.join(test_folder, r"input", r"geo")

    # TODO: Make these commandline args
    supernetwork = "Pocono_TEST1"
    """##NHD Subset (Brazos/Lower Colorado)"""
    # supernetwork = 'Brazos_LowerColorado_ge5'
    """##NWM CONUS Mainstems"""
    # supernetwork = 'Mainstems_CONUS'
    """These are large -- be careful"""
    # supernetwork = 'CONUS_FULL_RES_v20'
    # supernetwork = 'CONUS_Named_Streams' #create a subset of the full resolution by reading the GNIS field
    # supernetwork = 'CONUS_Named_combined' #process the Named streams through the Full-Res paths to join the many hanging reaches

    if verbose:
        print("creating supernetwork connections set")
    if showtiming:
        start_time = time.time()
    # STEP 1
    supernetwork_data, supernetwork_values = nnu.set_networks(
        supernetwork=supernetwork,
        geo_input_folder=geo_input_folder,
        verbose=False
        # , verbose = verbose
        ,
        debuglevel=debuglevel,
    )
    if verbose:
        print("supernetwork connections set complete")
    if showtiming:
        print("... in %s seconds." % (time.time() - start_time))

    # STEP 2
    if showtiming:
        start_time = time.time()
    if verbose:
        print("organizing connections into networks and reaches ...")
    networks = nru.compose_networks(
        supernetwork_values,
        verbose=False
        # , verbose = verbose
        ,
        debuglevel=debuglevel,
        showtiming=showtiming,
    )
    if verbose:
        print("reach organization complete")
    if showtiming:
        print("... in %s seconds." % (time.time() - start_time))

    # STEP 3
    if showtiming:
        start_time = time.time()
    executiontype = "parallel"  # 'serial'

    connections = supernetwork_values[0]

    # number_of_time_steps = 10 #
    number_of_time_steps = 50  #
    # number_of_time_steps = 1440 # number of timestep = 1140 * 60(model timestep) = 86400 = day

    # initialize flowdepthvel dict
    flowdepthvel = {
        connection: {
            "flow": np.zeros(number_of_time_steps + 1),
            "depth": np.zeros(number_of_time_steps + 1),
            "vel": np.zeros(number_of_time_steps + 1),
            "qlat": np.zeros(number_of_time_steps + 1),
        }
        for connection in connections
    }

    if executiontype == "serial":
        if verbose:
            print("executing serial computation on ordered reaches ...")

        for terminal_segment, network in networks.items():
            if showtiming:
                network_start_time = time.time()
            compute_network(
                terminal_segment=terminal_segment,
                network=network,
                supernetwork_data=supernetwork_data,
                nts=number_of_time_steps
                # , connections = connections
                ,
                verbose=False
                # , verbose = verbose
                ,
                debuglevel=debuglevel,
            )

            if verbose:
                print(f"{terminal_segment} completed")
            if showtiming:
                print("... in %s seconds." % (time.time() - network_start_time))

    elif executiontype == "parallel":

        # parallel_split = -1 # -1 turns off the splitting and runs everything through the lumped execution
        parallel_split = 10000  # -1 turns off the splitting and runs everything through the lumped execution

        # STEP 3a -- Large Networks
        # TODO: fix this messaging -- we are less specifically concerned with whether these are large networks and more interested in the general idea of grouping.
        if verbose:
            print(
                f"executing computation on ordered reaches for networks of order greater than {parallel_split} ..."
            )

        parallel_network_cluster = {
            terminal_segment: network
            for terminal_segment, network in networks.items()
            if network["maximum_reach_seqorder"] > parallel_split
        }
        # print(networks)
        compute_network_parallel_cluster(
            networks=parallel_network_cluster,
            supernetwork_data=supernetwork_data,
            nts=number_of_time_steps
            # , connections = connections
            # , verbose = False
            ,
            verbose=verbose,
            debuglevel=debuglevel,
        )
        if verbose:
            print(
                f"ordered reach computation complete for networks of order greater than {parallel_split}"
            )
        if verbose:
            print(
                f"calculation completed for the following networks (as labelled by their terminal segments)\n{list(parallel_network_cluster.keys())} completed"
            )
        if showtiming:
            print("... in %s seconds." % (time.time() - start_time))
        if showtiming:
            print(f"... with {num_processes} cores")

        ##STEP 3b -- Small Networks
        # if parallel_split >= 0: print(r'DO NOT RUN WITH `parallel_split` >= 0')
        parallel_network_separate = {
            terminal_segment: network
            for terminal_segment, network in networks.items()
            if network["maximum_reach_seqorder"] <= parallel_split
        }

        if verbose:
            print(f"executing parallel computation on ordered reaches .... ")
        # for terminal_segment, network in networks.items():
        #    print(terminal_segment, network)
        # print(tuple(([x for x in networks.keys()][i], [x for x in networks.values()][i]) for i in range(len(networks))))
        nslist = (
            [
                terminal_segment,
                network,
                supernetwork_data,  # TODO: This should probably be global...
                number_of_time_steps,
                False,
                debuglevel,
            ]
            for terminal_segment, network in parallel_network_separate.items()
        )
        with multiprocessing.Pool() as pool:
            results = pool.starmap(compute_network, nslist)
        if verbose:
            print(
                f"calculation completed for the following networks (as labelled by their terminal segments)\n{list(parallel_network_separate.keys())} completed"
            )

    if verbose:
        print("ordered reach computation complete")
    if showtiming:
        print("... in %s seconds." % (time.time() - start_time))
Esempio n. 8
0
def main():

    global connections
    args = cmd._handle_args()
    print(
        "This script demonstrates the parallel traversal of reaches developed from NHD datasets"
    )

    verbose = args.verbose
    debuglevel = -1 * int(args.debuglevel)
    showtiming = args.showtiming
    break_network_at_waterbodies = args.break_network_at_waterbodies
    supernetwork = args.supernetwork

    root = os.path.dirname(
        os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
    test_folder = os.path.join(root, r"test")

    if args.supernetwork == "custom":
        geo_input_folder = args.customnetworkfile
    else:
        geo_input_folder = os.path.join(test_folder, r"input", r"geo")

    if verbose:
        print("creating supernetwork connections set")
    if showtiming:
        start_time = time.time()
    # STEP 1
    supernetwork_data, supernetwork_values = nnu.set_networks(
        supernetwork=supernetwork,
        geo_input_folder=geo_input_folder
        # , verbose = False
        ,
        verbose=verbose,
        debuglevel=debuglevel,
    )
    if verbose:
        print("supernetwork connections set complete")
    if showtiming:
        print("... in %s seconds." % (time.time() - start_time))

    # STEP 2
    if showtiming:
        start_time = time.time()
    if verbose:
        print("organizing connections into networks and reaches ...")
    networks = nru.compose_networks(
        supernetwork_values,
        break_network_at_waterbodies=break_network_at_waterbodies,
        debuglevel=debuglevel
        # , verbose = False
        ,
        verbose=verbose,
        showtiming=showtiming,
    )
    if verbose:
        print("reach organization complete")
    if showtiming:
        print("... in %s seconds." % (time.time() - start_time))

    # STEP 3
    if verbose:
        print(f"Now computing the reaches in parallel")
    if verbose:
        print(f"This is just a DUMMY computation")
    if verbose:
        print(
            f"Only the number of potentially parallelizable reaches is shown for each order"
        )

    connections = supernetwork_values[0]
    # initialize flowdepthvel dict
    parallel_split = (
        -1
    )  # -1 turns off the splitting and runs everything through the lumped execution

    ##STEP 3_ -- Small Networks
    # Another parallelization method is to simply execute a network or group of networks independent from the others.
    ##TODO:Come back to this -- Essentially, we isolated each network and found that parallel speedup was minimal.
    # TODO: add the Parallel Split to show network splitting
    ##Each of the subgroups could use one of the three parallel methods or simply execute
    ##serially for itself (with other subgoups running at the same time).
    ##Probably with some effort, this could be effective.
    if parallel_split >= 0:
        print(r"DO NOT RUN WITH `parallel_split` >= 0")

    # STEP 3a -- Large Networks by total tree depth
    if showtiming:
        start_time = time.time()
    if verbose:
        print(
            f"executing computation on reaches ordered by distance from terminal node"
        )

    large_networks = {
        terminal_segment: network
        for terminal_segment, network in networks.items()
        if network["maximum_reach_seqorder"] > parallel_split
    }
    # print(large_networks)
    compute_network_parallel_totaltreedepth(
        large_networks,
        supernetwork_data=supernetwork_data
        # , connections = connections
        # , verbose = False
        ,
        verbose=verbose,
        debuglevel=debuglevel,
    )
    if verbose:
        print(
            f"ordered reach traversal complete for reaches ordered by distance from terminal node"
        )
    if showtiming:
        print("... in %s seconds." % (time.time() - start_time))

    # STEP 3b -- Large Networks by total tree depth -- segregating the headwaters
    if showtiming:
        start_time = time.time()
    if verbose:
        print(
            f"executing computation for all headwaters, then by reaches ordered by distance from terminal node"
        )

    large_networks = {
        terminal_segment: network
        for terminal_segment, network in networks.items()
        if network["maximum_reach_seqorder"] > parallel_split
    }
    # print(large_networks)
    compute_network_parallel_totaltreedepth_wHEADS(
        large_networks,
        supernetwork_data=supernetwork_data
        # , connections = connections
        # , verbose = False
        ,
        verbose=verbose,
        debuglevel=debuglevel,
    )
    if verbose:
        print(f"ordered reach computation complete for doing headwaters first")
    if showtiming:
        print("... in %s seconds." % (time.time() - start_time))

    ##STEP 3c -- Opportunistic Network Search
    # This method does as many as it can immediately in each round
    # (which means that there are not many left in the later rounds
    # of parallelization...)
    if showtiming:
        start_time = time.time()
    if verbose:
        print(f"executing computation on reaches opportunistically")
    if verbose:
        print(
            f"(this takes a little longer... we need to improve the method of assigning the opportunistic order...)"
        )

    large_networks = {
        terminal_segment: network
        for terminal_segment, network in networks.items()
        if network["maximum_reach_seqorder"] > parallel_split
    }
    # print(large_networks)
    compute_network_parallel_opportunistic(
        large_networks,
        supernetwork_data=supernetwork_data
        # , connections = connections
        # , verbose = False
        ,
        verbose=verbose,
        debuglevel=debuglevel,
    )
    if verbose:
        print(f"opportunistic reach computation complete")
    if showtiming:
        print("... in %s seconds." % (time.time() - start_time))
Esempio n. 9
0
def main():

    global connections
    global networks
    global flowdepthvel

    verbose = True
    debuglevel = 0
    showtiming = True

    test_folder = os.path.join(root, r'test')
    geo_input_folder = os.path.join(test_folder, r'input', r'geo')

    #TODO: Make these commandline args
    supernetwork = 'Pocono_TEST1'
    """##NHD Subset (Brazos/Lower Colorado)"""
    # supernetwork = 'Brazos_LowerColorado_ge5'
    """##NWM CONUS Mainstems"""
    # supernetwork = 'Mainstems_CONUS'
    """These are large -- be careful"""
    # supernetwork = 'CONUS_FULL_RES_v20'
    # supernetwork = 'CONUS_Named_Streams' #create a subset of the full resolution by reading the GNIS field
    # supernetwork = 'CONUS_Named_combined' #process the Named streams through the Full-Res paths to join the many hanging reaches

    if verbose: print('creating supernetwork connections set')
    if showtiming: start_time = time.time()
    #STEP 1
    supernetwork_data, supernetwork_values = nnu.set_networks(
        supernetwork = supernetwork
        , geo_input_folder = geo_input_folder
        , verbose = False
        # , verbose = verbose
        , debuglevel = debuglevel
        )
    if verbose: print('supernetwork connections set complete')
    if showtiming: print("... in %s seconds." % (time.time() - start_time))

    #STEP 2
    if showtiming: start_time = time.time()
    if verbose: print('organizing connections into networks and reaches ...')
    networks = nru.compose_networks(
        supernetwork_values
        , verbose = False
        # , verbose = verbose
        , debuglevel = debuglevel
        , showtiming = showtiming
        )
    if verbose: print('reach organization complete')
    if showtiming: print("... in %s seconds." % (time.time() - start_time))


    #STEP 3
    if showtiming: start_time = time.time()
    executiontype = 'serial' # 'parallel'

    if verbose: print('executing serial computation on ordered reaches ...')
    connections = supernetwork_values[0]

    number_of_time_steps = 10 # 
    # number_of_time_steps = 50 # 
    # number_of_time_steps = 1440 # number of timestep = 1140 * 60(model timestep) = 86400 = day
    
    #initialize flowdepthvel dict
    flowdepthvel = {connection:{'flow':np.zeros(number_of_time_steps + 1)
                                , 'depth':np.zeros(number_of_time_steps + 1)
                                , 'vel':np.zeros(number_of_time_steps + 1)
                                , 'qlat':np.zeros(number_of_time_steps + 1)}
                       for connection in connections
                   } 

    # from itertools import islice
    # def take(iterable, n):
    #     return list(islice(iterable, n))
    # import pdb; pdb.set_trace()

    if executiontype == 'serial':

        for terminal_segment, network in networks.items():
            if showtiming: network_start_time = time.time()
            compute_network(
                terminal_segment = terminal_segment
                , network = network
                , supernetwork_data = supernetwork_data
                , nts = number_of_time_steps
                # , connections = connections
                , verbose = False
                # , verbose = verbose
                , debuglevel = debuglevel
            )

            if verbose: print(f'{terminal_segment} completed')
            if showtiming: print("... in %s seconds." % (time.time() - network_start_time))
        
    if verbose: print('ordered reach computation complete')
    if showtiming: print("... in %s seconds." % (time.time() - start_time))