def _get_extracted_csv_table(relevant_subnets, tablename, input_path, sep=";"): """ Returns extracted csv data of the requested SimBench grid. """ csv_table = read_csv_data(input_path, sep=sep, tablename=tablename) if tablename == "Switch": node_table = read_csv_data(input_path, sep=sep, tablename="Node") bus_bus_switches = set(get_bus_bus_switch_indices_from_csv(csv_table, node_table)) else: bus_bus_switches = {} extracted_csv_table = _extract_csv_table_by_subnet(csv_table, tablename, relevant_subnets, bus_bus_switches=bus_bus_switches) return extracted_csv_table
def get_all_simbench_profiles(scenario, input_path=None, sep=";"): """ Returns a dict of DataFrames with all simbench profiles of the given scenario. These include all profiles with the net data received by get_simbench_net("1-complete_data-mixed-all-%s-sw" % str(scenario)). INPUT: **scenario** (int) - defines to which scenario the requested profiles belong to. Should be within [0, 1, 2]. OPTIONAL: **input_path** (path) - option to change the path to all simbench grid csv files. However, a change should not be necessary. **sep** (str, ";") - seperator of the csv files which contain the profiles information. OUTPUT: **profiles** (dict) - dict of DataFrames with all simbench profiles of the given scenario """ input_path = input_path if input_path is not None else complete_data_path(scenario) csvtablenames = csv_tablenames(['profiles']) # read all profiles data profiles = read_csv_data(input_path, sep=sep, tablename=csvtablenames) # rename csv_tablenames by pandapower element names for csv_name, pp_name in zip(csvtablenames, pp_profile_names()): profiles[pp_name] = profiles.pop(csv_name) return profiles
def get_extracted_csv_data(relevant_subnets, input_path, sep=";", **kwargs): """ Returns extracted csv data of the requested SimBench grid (per default from all SimBench grids csv data). **kwargs are ignored. """ # --- import input data if 'complete_data' in relevant_subnets[0]: # return complete data return read_csv_data(input_path, sep=sep) else: csv_data = dict() for tablename in csv_tablenames(['elements', 'profiles', 'types', 'cases']): csv_data[tablename] = _get_extracted_csv_table(relevant_subnets, tablename, input_path=input_path) return csv_data
def csv_data_to_test_extracting(): test_network_path = os.path.join(sb_dir, "test", "converter", "test_network") csv_data = sb.read_csv_data(test_network_path, sep=";") tested_tables = ["Node", "Load", "ExternalNet", "Switch", "Coordinates", "Measurement"] csv_data = {tt: csv_data[tt] for tt in tested_tables} csv_data["Node"]["subnet"] = ["EHV1_Feeder1"]*2 + ["EHV2"]*2 + ["EHV1_HV1"]*3 + [ "HV1_Feder%i" % i for i in range(4)] + ["HV1_MV3.101"]*2 + ["MV3.101"] + ["EHV1_Feeder1"]*2 csv_data["Load"]["subnet"] = ["EHV1_Load1", "EHV2_Load6", "EHV1_HV1_eq", "EHV1_HV1_load", "HV1_MV3.101_eq", "MV3.101_HV1_eq"] csv_data["ExternalNet"]["subnet"] = ["EHV1_boundary", "HV1_EHV1_eq", "MV3.101_HV1_eq", "MV3.101_bound", "EHV1_HV1_eq"] csv_data["Switch"].loc[3] = csv_data["Switch"].loc[0] csv_data["Switch"].loc[3, ["id", "nodeA", "nodeB"]] = ["BusBus2", "Bus 2", "Bus 4"] csv_data["Switch"]["subnet"] = ["EHV1_HV1", "EHV1_HV1", "HV1_MV3.101", "HV1_MV3.101", "EHV1_HV1"] csv_data["Coordinates"]["subnet"] = list(csv_data["Node"]["subnet"][1:12]) + ["MV3.101"] csv_data["Measurement"] = pd.concat([csv_data["Measurement"], csv_data["Measurement"], csv_data["Measurement"]], ignore_index=True) csv_data["Measurement"]["name"] = ["Messung %i" % i for i in range(1, 7)] csv_data["Measurement"]["subnet"] = ["HV1", "HV1_MV3.101", "EHV1_HV1"]*2 return csv_data
def test_aux_nodes_without_multiple_connected_branches(): """ Test csv_data if there are auxiliary nodes with more than one connected branch element. """ for scenario in range(3): csv_data = sb.read_csv_data(sb.complete_data_path(scenario), sep=";") annwdb = aux_node_names_with_dupl_branches(csv_data) assert not len(annwdb)