コード例 #1
0
def load_lakes(lakes_table, convert_dict):
    """
    Load lakes and add field for sediment trapping calculations

    :param lakes_table:
    :param convert_dict:
    :return: numpy array of lakes
    """

    print("Loading lakes")

    flds = [
        "GOID", "GOOD", "Lake_type", "SED_ACC", "IN_STREAM", "IN_CATCH",
        "Vol_total", "Dis_avg", "Res_time"
    ]

    arr = arcpy.da.TableToNumPyArray(lakes_table, flds, null_value=0)

    arr = helper.add_fields(arr, [("TE_brune", 'f8')])
    arr = helper.add_fields(arr, [("LOSS_LKES_OUT_NET", 'f8')])

    arr["TE_brune"] = 0
    arr["LOSS_LKES_OUT_NET"] = 0

    for a in arr:
        a["GOID"] = convert_dict.get(a["GOID"], 0)

    return arr
コード例 #2
0
def load_streams(stream_table, dof_field):
    """
    Loads the streams and adds a field for holding the DOF values

    :param stream_table: numpy array representing the river reaches
    :param dof_field: field name to store DOF results
    :return:
    """
    flds = [
        fd.BAS_ID, fd.GOID, fd.NOID, fd.NDOID, fd.NUOID, fd.RIV_ORD,
        fd.DIS_AV_CMS, fd.HYFALL
    ]

    tool.check_fields(stream_table, flds)

    arr = arcpy.da.TableToNumPyArray(stream_table, flds, null_value=0)
    arr = tool.add_fields(arr, [(str(dof_field), 'f4')])
    arr[dof_field] = 0
    return arr
コード例 #3
0
def load_streams(stream_table):
    """
    Loading stream network and adding fields

    :param stream_table:
    :return: stream array with necessary fields
    """

    # Existing fields to load
    flds = [
        fd.GOID, fd.NOID, fd.NDOID, fd.NUOID, fd.INC, fd.DIS_AV_CMS, fd.BAS_ID,
        fd.UPLAND_SKM, fd.ERO_YLD_TON
    ]

    arr = arcpy.da.TableToNumPyArray(stream_table, flds)

    arr = helper.add_fields(arr, [(fd.SED_LSS_LKS_OT_NAT, 'f8')])
    arr = helper.add_fields(arr, [(fd.SED_LSS_LKS_IN_NAT, 'f8')])
    arr = helper.add_fields(arr, [(fd.SED_NAT_UP, 'f8')])
    arr = helper.add_fields(arr, [(fd.SED_NAT, 'f8')])

    arr = helper.add_fields(arr, [(fd.SED_LSS_LKS_OT_ANT, 'f8')])
    arr = helper.add_fields(arr, [(fd.SED_LSS_LKS_IN_ANT, 'f8')])
    arr = helper.add_fields(arr, [(fd.SED_LSS_DMS_ANT, 'f8')])
    arr = helper.add_fields(arr, [(fd.SED_ANT_UP, 'f8')])
    arr = helper.add_fields(arr, [(fd.SED_ANT, 'f8')])

    arr = helper.add_fields(arr, [(fd.SED_LSS_TOT, 'f8')])

    arr = helper.add_fields(arr, [(fd.SED, 'f8')])

    arr[fd.SED_NAT] = 0
    arr[fd.SED_NAT_UP] = 0
    arr[fd.SED_LSS_LKS_OT_NAT] = 0
    arr[fd.SED_LSS_LKS_IN_NAT] = 0

    arr[fd.SED_ANT] = 0
    arr[fd.SED_ANT_UP] = 0
    arr[fd.SED_LSS_LKS_OT_ANT] = 0
    arr[fd.SED_LSS_LKS_IN_ANT] = 0
    arr[fd.SED_LSS_DMS_ANT] = 0

    arr[fd.SED_LSS_TOT] = 0

    arr[fd.SED] = 0

    return arr
コード例 #4
0
def update_stream_routing_index(streams):
    """
    Function to sort the stream network using the upstream area. This allows
    the network to be processed in order from headwaters to the ocean. Afterwards
    the Network IDS are recalculated

    :param streams: numpy array of stream network
    :return:
    """

    print("Updating stream index")

    # Maintain the old GOID values in a new field
    streams = helper.add_fields(streams, [("OGOID", 'i4')])
    streams["OGOID"] = streams["GOID"]

    # Sort the array by upland area and basin
    # This is key to being able to process river network from top to bottom
    streams.sort(order=['BAS_ID', 'UPLAND_SKM'])

    # Create Routing Dictionaries and fill
    oid_dict = {}
    convert_dict = {}
    ups_dict = defaultdict(str)
    down_dict = defaultdict(long)

    i = 1
    for myrow in streams:
        oid_dict[int(myrow["GOID"])] = i
        i += 1

    i = 1
    for myrow in streams:
        dn_old_oid = myrow["NDOID"]
        new_oid = oid_dict.get(int(dn_old_oid), -1)

        if new_oid != -1:
            # Write OID of next downstream reach
            down_dict[i] = new_oid
            # Write OID of next upstream reach
            exi_value = ups_dict.get(int(new_oid), -99)

            if exi_value == -99:
                ups_dict[int(new_oid)] = i
            else:
                new_value = str(exi_value) + '_' + str(i)
                ups_dict[int(new_oid)] = new_value
        i += 1

    # Writing index values back to numpy
    i = 1
    for myrow in streams:
        myrow["NOID"] = i
        myrow["NDOID"] = down_dict[i]
        myrow["NUOID"] = ups_dict[i]

        i = i + 1

    # Create Dictionary to convert old (KEY) to new (VALUE)
    i = 1
    for myrow in streams:
        old = myrow["OGOID"]
        new = myrow["NOID"]
        convert_dict[int(old)] = new
        i += 1

    return streams, convert_dict
コード例 #5
0
def run_csi(stamp, para, scenarios, st_flds, paths):
    """
    This is the main function to calculate the Connectivity Status Index (CSI); to calculate the river
    status, and to post-process the results into tables.

    The module is divided into three parts:

    1) Calculation of the CSI for each scenario.
    The fields that hold the CSI as well as the Dominance are added
    for each scenario and are named after the scenario name given
    in the config file

    2) The calculation of river status, in field ``CAT_FFR`` which
    determines the river or river stretch as either *'Free-flowing' (1)*,
    with *'Good connectivity status' (2)*, or *'impacted' (3)

    3) The calculation of benchmark, global and sensitivity statistics.

    :param stamp: Timestamp
    :param para: Parameters
    :param scenarios: Scenario set
    :param st_flds: list of fields
    :param paths: path settings
    :return:

    """

    # Looping through the individual scenarios
    for scenario in scenarios:

        sce_name = scenario[0]
        list_of_fields = scenario[1]
        list_of_weights = scenario[2]
        csi_threshold = scenario[3]
        flood_weight_damp = scenario[4]
        filter_thres = scenario[5]
        to_process = scenario[6]
        to_export = scenario[7]

        if to_process == 0:
            # prt("Skipped: " + sce_name)
            continue
        else:
            prt("Processing: " + sce_name)

        # Define output CSI table
        csi_tb = paths["gdb_full_path"] + "\\" + "csi_tb"

        # Define output CSI fc
        csi_fc_name = "csi_fc_" + str(sce_name)

        stream_array = tools.load_stream_array(
            stream_feature_class=para["streams_fc"],
            stream_fields=st_flds)

        # Adding results fields to output table

        # Get the names of new csi fields to append
        sce_name, dom_field_name, ff_field_name, \
        csi_field_names = tools.get_csi_field_names(name=sce_name)

        # Get the names of new ffr fields to append
        ffr_stat1_field, ffr_stat2_field, ffr_dis_field, \
        ffr_field_names = tools.get_ffr_field_names(name=sce_name)

        prt("Adding results fields to stream array")
        stream_csi = tools.add_fields(array=stream_array,
                                      desc=csi_field_names +
                                           ffr_field_names)

        prt("")
        prt("***********************")
        prt("PART 1: Calculating CSI")
        prt("***********************")
        prt("")

        prt(str(scenario))

        stream_csi = csi.calculate_csi(
            streams_array=stream_csi,
            csi_field_name=sce_name,
            dom_field_name=dom_field_name,
            ff_field_name=ff_field_name,
            fields=list_of_fields,
            weights=list_of_weights,
            flood_weight=flood_weight_damp,
            csi_threshold=csi_threshold,
            test_pickle_folder=paths["test_pickle_folder"])

        # Saving CSI slice to Pickle for later conducting sensitivity analysis
        # Each scenario result will have their own pickle. Sensitivity
        # analysis loads the pickles and processes them together
        tools.save_as_cpickle(pickle_object=stream_csi[sce_name],
                              folder=paths["sta_csi_folder"], name=sce_name,
                              file_extension=".csi")

        # Assemble a results list that holds attributes of the scenario run_sed
        # Results from the global analysis will later be added. After each
        # scenario is run_sed, the list gets added to a list of lists. The list
        # of lists becomes the sheet "Global_stats" in the results excel
        result_list = [stamp]
        result_list += [sce_name]
        for f in list_of_fields:
            result_list += [f]
        for w in list_of_weights:
            result_list += [w]
        result_list += [csi_threshold] + \
                       [flood_weight_damp, to_process, filter_thres]

        prt("")
        prt("*************************************")
        prt("PART 2: Calculating global statistics")
        prt("*************************************")
        prt("")

        global_stats = sts.post_stats_global_single(
            stream_csi, sce_name, csi_threshold)

        # The results of the global global_stats.py analysis is appended to the
        # results list
        for item in global_stats:
            result_list.append(item)

        prt("")
        prt("***********************************************")
        prt("PART 3: Calculating global dominance statistics")
        prt("***********************************************")
        prt("")
        dom_stats = dm.post_stats_dom_single(
            stream_csi, sce_name, csi_threshold)

        dom_stats["Stamp"] = stamp
        dom_stats_sort = dom_stats[["Stamp", "SCE_NAME", "Pressure", "NUM"]]

        tools.export_excel(dom_stats_sort, "Global_dom", paths["writer"],
                           False)

        prt("")
        prt("**********************************")
        prt("PART 4: Dissolving Backbone Rivers")
        prt("**********************************")
        prt("")

        # Filtering, dissolving and aggregating
        # This part is creates a copy of the stream array, applies filtering
        # and dissolving operations and then overwrites the FFR status fields,
        # and well as the river stretch IDs. The CSI value remains as is.

        # Make a copy of original results.
        stream_alt = np.copy(stream_csi)

        # Dissolve
        prt("Dissolving part 1 of %s: " % ff_field_name)
        stream_alt = sta.dissolve_rivers(stream_alt, ff_field_name,
                                         ffr_dis_field)

        # The spatial dissolving identifies river stretches that were both small and had a
        # disproportionally high impact on the CSI. This function conducts a spatial selected
        # of the river reaches that caused the sections with the high impact
        prt("Apply filter for %s: " % ff_field_name)
        bb_ids_to_filter = sta.apply_volume_filter(
            csi_fc=stream_alt,
            ff_field=ff_field_name,
            dis_id_field=ffr_dis_field,
            pct_aff_thres=filter_thres)

        # Spatial selection and overwrite with zeros
        stream_alt = sta.update_csi(stream_alt, bb_ids_to_filter, ffr_dis_field, sce_name, ff_field_name)

        # Dissolve again
        prt("Dissolving part 2 of %s: " % ff_field_name)
        stream_alt = sta.dissolve_rivers(stream_alt, ff_field_name,
                                         ffr_dis_field)

        prt("Updating array %s: " % ff_field_name)
        stream_csi = sta.update_streams_with_diss_id(
            stream_csi, stream_alt, ffr_dis_field)

        # Status calculations
        prt("Calculating Status of %s: " % ff_field_name)
        stream_csi = sta.calculate_sta(stream_csi, stream_alt, ff_field_name, ffr_stat1_field,
                                       ffr_stat2_field, ffr_dis_field)

        prt("")
        prt("*******************************************")
        prt("PART 5: Calculating benchmarking statistics")
        prt("*******************************************")
        prt("")

        bench_val, bench_dom = bm.post_stats_bench_single(
            stream_array_mod=stream_alt,
            scenario_name=sce_name,
            bench_fc=para["bench_fc"],
            csi_threshold=csi_threshold)

        # Adding the value for number of free-flowing rivers at the end of
        # the global results list
        result_list += [bench_val]

        sts.export_global_stats_results_to_excel(name_sheet="Global_stats",
                                                 result_list=result_list,
                                                 writer=paths["writer"])

        bench.export_benchmarking_dom_results(bench_dom=bench_dom,
                                              stamp=stamp,
                                              writer=paths["writer"])

        prt("")
        prt("***************************************")
        prt("PART 6: Calculating backbone statistics")
        prt("***************************************")
        prt("")

        bb.backbone_stats(stream_csi, sce_name, para["min_length"],
                          paths["sta_pickle_folder"], paths["writer"])

        prt("")
        prt("***************************************")
        prt("PART 7: Exporting results              ")
        prt("***************************************")
        prt("")

        if to_export == 1:
            csi_table = str(csi_tb) + str(sce_name)

            # Reduce numpy array to only necessary fields, i.e
            # get the names of new csi fields to append to input streams
            # feature class
            distilled_fields = [fd.GOID,
                                sce_name,
                                dom_field_name,
                                ff_field_name,
                                ffr_stat1_field,
                                ffr_stat2_field,
                                ffr_dis_field]

            distilled = stream_csi[distilled_fields]

            prt("Exporting table: " + str(csi_table))
            arcpy.da.NumPyArrayToTable(distilled, csi_table)

            prt("Joining and exporting feature class")

            output_fc = tools.export_joined(
                output_geodatabase_path=paths["gdb_full_path"],
                output_table_name=csi_fc_name,
                table_to_join=csi_table,
                join_table=para["streams_fc"])

            prt("Renaming fields")
            tools.remove_csi_traces(output_fc, sce_name)
            prt("Deleting join fields")
            tools.delete_field(output_fc, ["OBJECTID_1", "GOID_1"])

        stream_array = None
        stream_csi = None

    prt("")
    prt("***************************************")
    prt("PART 8: Post processing sensitivity    ")
    prt("***************************************")
    prt("")

    sns.pst_csi_calculations(paths["sta_csi_folder"])

    prt("")
    prt("***************************************")
    prt("PART 9: Open results Excel and end     ")
    prt("***************************************")
    prt("")

    os.system("start " + paths["excel_file"])
    print(datetime.datetime.now())
    prt("Done")