Пример #1
0
def _get_parameters_to_rename_and_multiply():
    """ Returns a dict of tuples and a dict of dataframes where csv column names are assigned to
    pandapower columns names which differ. """
    # --- create dummy_net to get pp columns
    dummy_net = pp.create_empty_network()
    _extend_pandapower_net_columns(dummy_net)
    _prepare_res_bus_table(dummy_net)
    for elm in ["dcline"]:
        dummy_net[elm].rename(columns={"type": "std_type"}, inplace=True)
    for elm in ["gen", "sgen"]:
        dummy_net[elm]["vm_pu"] = np.nan
        dummy_net[elm]["va_degree"] = np.nan

    # --- get corresponding tables and dataframes
    corr_strings = _csv_table_pp_dataframe_correspondings(str)
    csv_tablenames_, pp_dfnames = _csv_table_pp_dataframe_correspondings(list)

    # --- initialize tuples_dict
    tuples_dict = dict.fromkeys(corr_strings, [("id", "name", None)])
    tuples_dict['NodePFResult*res_bus'] = []

    # --- determine tuples_dict
    for corr_str, csv_tablename, pp_dfname in zip(corr_strings,
                                                  csv_tablenames_, pp_dfnames):
        # adapt tuples_dict initialization of Type tables
        if "Type" in csv_tablename:
            tuples_dict[corr_str] = [("id", "std_type", None)]
        # get all column correspodings
        corr_col_tuples = _csv_pp_column_correspondings(csv_tablename)
        # get csv and pp columns
        csv_columns = get_columns(csv_tablename)
        pp_columns = dummy_net[pp_dfname].columns if "std_types" not in pp_dfname else \
            pd.DataFrame(dummy_net["std_types"][pp_dfname[10:]]).T.columns
        # determine tuples_dict: all tuples which are in columns of both, csv and pp
        tuples_dict[corr_str] = tuples_dict[corr_str] + [
            corr_col_tuple
            for corr_col_tuple in corr_col_tuples if corr_col_tuple[0] in
            csv_columns and corr_col_tuple[1] in pp_columns
        ]
        # unused:


#    dfs_dict = {key: pd.DataFrame(data, columns=["csv_col", "pp_col", "factor"]) for key, data in
#                tuples_dict.items()}
    return tuples_dict
Пример #2
0
def _copy_data(input_data, output_data):
    """ Copies the data from output_data[corr_strings] into input_data[element_table]. This function
        handles that some corr_strings are not in output_data.keys() and copies all columns which
        exists in both, output_data[corr_strings] and input_data[element_table]. """
    corr_strings = _csv_table_pp_dataframe_correspondings(str)
    output_names = _csv_table_pp_dataframe_correspondings(list)[int(
        _is_pp_type(output_data))]

    for corr_str, output_name in zip(corr_strings, output_names):
        if corr_str in input_data.keys() and input_data[corr_str].shape[0]:
            cols_to_copy = list(
                set(output_data[output_name].columns)
                & set(input_data[corr_str].columns))
            if version.parse(pd.__version__) >= version.parse("0.23.0"):
                output_data[output_name] = pd.concat(
                    [
                        output_data[output_name],
                        input_data[corr_str][cols_to_copy]
                    ],
                    ignore_index=True,
                    sort=False).reindex(
                        columns=output_data[output_name].columns)
            elif version.parse(pd.__version__) >= version.parse("0.21.0"):
                output_data[output_name] = pd.concat(
                    [
                        output_data[output_name],
                        input_data[corr_str][cols_to_copy]
                    ],
                    ignore_index=True).reindex(
                        columns=output_data[output_name].columns)
            else:
                output_data[output_name] = pd.concat(
                    [
                        output_data[output_name],
                        input_data[corr_str][cols_to_copy]
                    ],
                    ignore_index=True).reindex_axis(
                        output_data[output_name].columns, axis=1)
            if "std_types" in corr_str and _is_pp_type(output_data):
                output_data[output_name].index = input_data[corr_str][
                    "std_type"]
            _inscribe_fix_values(output_data, output_name)
Пример #3
0
def _rename_and_split_input_tables(data):
    """ Splits the tables of ExternalNet, PowerPlant, RES, ext_grid, gen, sgen and name the df
        according to _csv_table_pp_dataframe_correspondings(). """
    # --- initilizations
    split_gen = ["ext_grid", "gen", "sgen"] if _is_pp_type(data) else [
        "ExternalNet", "PowerPlant", "RES"
    ]
    split_gen_col = "phys_type" if _is_pp_type(data) else "calc_type"
    split_Line = [] if _is_pp_type(data) else ["Line"]
    split_ppelm_into_type_and_elm = ["dcline"] if _is_pp_type(data) else []
    input_elm_col = "pp" if _is_pp_type(data) else "csv"
    output_elm_col = "csv" if _is_pp_type(data) else "pp"
    corr_df = _csv_table_pp_dataframe_correspondings(pd.DataFrame)
    corr_df["comb_str"] = corr_df["csv"] + "*" + corr_df["pp"]

    # all elements, which need to be converted to multiple output element tables, (dupl) need to be
    # treated specially, thus must be in split lists
    dupl = corr_df[input_elm_col][corr_df[input_elm_col].duplicated()]
    assert dupl.isin(split_gen + split_Line +
                     split_ppelm_into_type_and_elm).all()

    # -- start renaming and in case of dupl also splitting
    for idx in corr_df.index:
        # get actual element tablenames
        input_elm = corr_df.loc[idx, input_elm_col]
        output_elm = corr_df.loc[idx, output_elm_col]
        corr_str = corr_df.loc[idx, "comb_str"]

        if input_elm in split_gen:
            data[corr_str] = data[input_elm][data[input_elm][split_gen_col] ==
                                             _get_split_gen_val(output_elm)]
        elif input_elm in split_Line:
            continue  # already done in _csv_types_to_pp1()
        elif input_elm in split_ppelm_into_type_and_elm:
            if "Type" in output_elm:
                is_uniq = ~data[input_elm].std_type.duplicated()
                data[corr_str] = data[input_elm].loc[is_uniq]
            else:
                data[corr_str] = data[input_elm]
        else:  # rename data.keys for all elements without special treatment
            if corr_str not in data.keys():
                if input_elm in data.keys():
                    data[corr_str] = data.pop(input_elm)
                elif _is_pp_type(data) and "std_types" in input_elm:
                    data[corr_str] = pd.DataFrame(
                        data["std_types"][input_elm[10:]])
                else:
                    data[corr_str] = pd.DataFrame()
Пример #4
0
def _replace_name_index(data):
    """ While the simbench csv format assigns connected nodes via names, pandapower assigns via
        indices. This function replaces the assignment of the input data. """
    node_names = {"node", "nodeA", "nodeB", "nodeHV", "nodeMV", "nodeLV"}
    bus_names = {"bus", "from_bus", "to_bus", "hv_bus", "mv_bus", "lv_bus"}
    corr_strings = _csv_table_pp_dataframe_correspondings(str)
    corr_strings.remove(
        "Measurement*measurement")  # already done in convert_measurement()

    if _is_pp_type(data):
        for corr_str in corr_strings:
            for col in node_names & set(data[corr_str].columns):
                data[corr_str][col] = data["Node*bus"]["id"].loc[data[corr_str]
                                                                 [col]].values
    else:
        for corr_str in corr_strings:
            for col in bus_names & set(data[corr_str].columns):
                # each bus name must be unique
                data[corr_str][col] = data["Node*bus"].index[idx_in_2nd_array(
                    data[corr_str][col].values,
                    data["Node*bus"]["name"].values)]