コード例 #1
0
ファイル: test_pv_reports.py プロジェクト: yuanzy97/PyDSS
def test_pv_reports(cleanup_project):
    PyDssProject.run_project(
        PV_REPORTS_PROJECT_PATH,
        simulation_file=SIMULATION_SETTINGS_FILENAME,
    )
    results = PyDssResults(PV_REPORTS_PROJECT_PATH)

    # This test data doesn't have changes for Capacitors or RegControls.
    capacitor_change_counts = results.read_report(
        "Capacitor State Change Counts")
    assert len(capacitor_change_counts["scenarios"]) == 2
    assert not capacitor_change_counts["scenarios"][1]["capacitors"]

    reg_control_change_counts = results.read_report(
        "RegControl Tap Number Change Counts")
    assert len(reg_control_change_counts["scenarios"]) == 2
    assert not reg_control_change_counts["scenarios"][1]["reg_controls"]

    pv_clipping = results.read_report("PV Clipping")
    assert len(pv_clipping["pv_systems"]) == 5
    for pv_system in pv_clipping["pv_systems"]:
        assert "pv_clipping" in pv_system

    pv_curtailment = results.read_report("PV Curtailment")
    assert isinstance(pv_curtailment, pd.DataFrame)
コード例 #2
0
ファイル: test_pydss_project.py プロジェクト: NREL/PyDSS
def test_create_project(pydss_project):
    project_name = "test-project"
    project_dir = os.path.join(PATH, project_name)
    # Intentionally not in alphabetic order so that we verify our designated
    # ordering.
    scenarios = [
        PyDssScenario("b_scenario1"),
        PyDssScenario("a_scenario2"),
    ]
    project = PyDssProject.create_project(PATH, project_name, scenarios)
    assert os.path.exists(project_dir)
    for dir_name in PyDssScenario._SCENARIO_DIRECTORIES:
        for scenario in scenarios:
            path = os.path.join(
                project_dir,
                SCENARIOS,
                scenario.name,
                dir_name,
            )
            assert os.path.exists(path)

    project2 = PyDssProject.load_project(project_dir)
    assert project.name == project2.name
    scenarios1 = project.scenarios
    scenarios1.sort(key=lambda x: x.name)
    scenarios2 = project2.scenarios
    scenarios2.sort(key=lambda x: x.name)

    assert len(scenarios1) == len(scenarios2)
    for i in range(len(project.scenarios)):
        assert scenarios1[i].name == scenarios2[i].name
        assert scenarios1[i].controllers == scenarios2[i].controllers
コード例 #3
0
def run_example(example_name, scenarios):

    proc = None
    assert isinstance(example_name, str)
    assert isinstance(scenarios, list)
    base_projects_path = copy_examples_to_temp_folder(example_name)
    for S in scenarios:
        assert isinstance(S, dict)
        sim_file = S["TOML"]
        sup_file = S["file"]

        logger.info('Running scenario %s for example %s', example_name, sim_file)
        if sup_file != None:
            sup_file_path = os.path.join(base_projects_path, sup_file)
            assert os.path.exists(sup_file_path)
            dir_path = os.path.dirname(sup_file_path)
            dir_main = os.getcwd()
            try:
                os.chdir(dir_path)
                proc = subprocess.Popen([sys.executable, sup_file_path], shell=True)
            finally:
                os.chdir(dir_main)
        try:
            if sim_file:
                project_path = os.path.join(base_projects_path, example_name)
                assert os.path.exists(base_projects_path)
                PyDssProject.run_project(project_path, options=None, tar_project=False, zip_project=False,
                                         simulation_file=sim_file)
        finally:
            if proc != None:
                proc.terminate()
    return
コード例 #4
0
def test_auto_snapshot_time_point(cleanup_project):
    PyDssProject.run_project(
        AUTO_SNAPSHOT_TIME_POINT_PROJECT_PATH,
        simulation_file=SIMULATION_SETTINGS_FILENAME,
    )
    project = PyDssProject.load_project(AUTO_SNAPSHOT_TIME_POINT_PROJECT_PATH)
    settings = project.read_scenario_time_settings("max_pv_load_ratio")
    assert str(settings["start_time"]) == "2020-01-01 11:15:00"
コード例 #5
0
def run_project_with_custom_exports(path, scenario, sim_file, data):
    """Runs a project while overriding an export config file."""
    exports = f"{path}/Scenarios/{scenario}/ExportLists/Exports.toml"
    backup = exports + ".bk"
    shutil.copyfile(exports, backup)
    dump_data(data, exports)

    try:
        PyDssProject.run_project(path, simulation_file=sim_file)
    finally:
        os.remove(exports)
        os.rename(backup, exports)
コード例 #6
0
def add_post_process(project_path, scenario_name, script, config_file):
    """Add post-process script to PyDSS scenario."""
    setup_logging("PyDSS", console_level=logging.INFO)
    project = PyDssProject.load_project(project_path)
    scenario = project.get_scenario(scenario_name)
    pp_info = {"script": script, "config_file": config_file}
    scenario.add_post_process(pp_info)
    project.serialize()
コード例 #7
0
def test_space_estimation_with_dry_run(cleanup_project):
    """Should generate esimated space with dry run simulation"""
    # dry run pydss project
    project = PyDssProject.load_project(RUN_PROJECT_PATH)
    project.run(dry_run=True)

    assert "scenario1" in project.estimated_space
    assert project.estimated_space["scenario1"] is not None
    assert project.estimated_space["scenario1"] > 0
コード例 #8
0
ファイル: test_pydss_examples.py プロジェクト: yuanzy97/PyDSS
def run_example(example_name, scenarios):

    proc = None
    assert isinstance(example_name, str)
    assert isinstance(scenarios, list)
    base_projects_path = copy_examples_to_temp_folder(example_name)
    for S in scenarios:
        assert isinstance(S, dict)
        sim_file = S["TOML"]
        sup_file = S["file"]

        print(f'Running scenario {example_name} for example {sim_file}')
        if sup_file != None:
            sup_file_path = os.path.join(base_projects_path, sup_file)
            assert os.path.exists(sup_file_path)
            dir_path = os.path.dirname(sup_file_path)
            dir_main = os.getcwd()
            try:
                os.chdir(dir_path)
                print(dir_path)
                print(f"Running {sup_file_path} in a subprocess")
                print(sys.executable)
                proc = subprocess.Popen([sys.executable, sup_file_path],
                                        shell=True)
            finally:
                os.chdir(dir_main)
        try:
            if sim_file:
                project_path = os.path.join(base_projects_path, example_name)
                assert os.path.exists(base_projects_path)
                PyDssProject.run_project(project_path,
                                         options=None,
                                         tar_project=False,
                                         zip_project=False,
                                         simulation_file=sim_file)
        finally:
            print("Run complete")

            if proc != None:
                proc.terminate()
    return


#test_external_interfaces_example()
コード例 #9
0
def test_export_moving_averages(cleanup_project):
    # Compares the moving average storage/calculation with a rolling average
    # computed on dataset with every time point.
    path = CUSTOM_EXPORTS_PROJECT_PATH
    sim_file = SIMULATION_SETTINGS_FILENAME
    circuit = "Circuit.heco19021"
    window_size = 10
    PyDssProject.run_project(path, simulation_file=sim_file)

    # This DataFrame will have values at every time point.
    df1 = _get_dataframe(path,
                         "Circuits",
                         "LineLosses",
                         circuit,
                         real_only=True)
    assert len(df1) == 96
    df1_rm = df1.rolling(window_size).mean()

    data = {
        "Circuits": {
            "LineLosses": {
                "store_values_type": "moving_average",
                "window_size": window_size,
            },
        }
    }
    run_project_with_custom_exports(path, "scenario1", sim_file, data)
    results = PyDssResults(path)
    assert len(results.scenarios) == 1
    scenario = results.scenarios[0]

    # This DataFrame will have moving averages.
    df2 = _get_dataframe(path,
                         "Circuits",
                         "LineLossesAvg",
                         circuit,
                         real_only=True)
    assert len(df2) == 96

    for val1, val2 in zip(df1_rm.iloc[:, 0].values, df2.iloc[:, 0].values):
        if np.isnan(val1):
            assert np.isnan(val2)
        else:
            assert round(val2, 5) == round(val1, 5)
コード例 #10
0
ファイル: test_pydss_project.py プロジェクト: yuanzy97/PyDSS
def test_create_project(pydss_project):
    project_name = "test-project"
    project_dir = os.path.join(PATH, project_name)
    thermal_upgrade = {
        "script": "AutomatedThermalUpgrade",
        "config_file": THERMAL_CONFIG,
    }
    voltage_upgrade = {
        "script": "AutomatedVoltageUpgrade",
        "config_file": VOLTAGE_CONFIG,
    }
    # Intentionally not in alphabetic order so that we verify our designated
    # ordering.
    scenarios = [
        PyDssScenario("b_scenario1", post_process_infos=[thermal_upgrade]),
        PyDssScenario("a_scenario2", post_process_infos=[voltage_upgrade]),
    ]
    project = PyDssProject.create_project(PATH, project_name, scenarios)
    assert os.path.exists(project_dir)
    for dir_name in PyDssScenario._SCENARIO_DIRECTORIES:
        for scenario in scenarios:
            path = os.path.join(
                project_dir,
                SCENARIOS,
                scenario.name,
                dir_name,
            )
            assert os.path.exists(path)

    project2 = PyDssProject.load_project(project_dir)
    assert project.name == project2.name
    scenarios1 = project.scenarios
    scenarios1.sort(key=lambda x: x.name)
    scenarios2 = project2.scenarios
    scenarios2.sort(key=lambda x: x.name)

    assert len(scenarios1) == len(scenarios2)
    for i in range(len(project.scenarios)):
        assert scenarios1[i].name == scenarios2[i].name
        assert scenarios1[i].controllers == scenarios2[i].controllers
        assert scenarios1[i].post_process_infos == scenarios2[
            i].post_process_infos
コード例 #11
0
    def __init__(self,
                 project_path=None,
                 project=None,
                 in_memory=False,
                 frequency=False,
                 mode=False):
        """Constructs PyDssResults object.

        Parameters
        ----------
        project_path : str | None
            Load project from files in path
        project : PyDssProject | None
            Existing project object
        in_memory : bool
            If true, load all exported data into memory.
        frequency : bool
            If true, add frequency column to all dataframes.
        mode : bool
            If true, add mode column to all dataframes.

        """
        options = ElementOptions()
        if project_path is not None:
            # TODO: handle old version?
            self._project = PyDssProject.load_project(
                project_path,
                simulation_file=RUN_SIMULATION_FILENAME,
            )
        elif project is None:
            raise InvalidParameter("project_path or project must be set")
        else:
            self._project = project
        self._fs_intf = self._project.fs_interface
        self._scenarios = []
        filename = self._project.get_hdf_store_filename()
        driver = "core" if in_memory else None
        self._hdf_store = h5py.File(filename, "r", driver=driver)

        if self._project.simulation_config.exports.export_results:
            for name in self._project.list_scenario_names():
                metadata = self._project.read_scenario_export_metadata(name)
                scenario_result = PyDssScenarioResults(
                    name,
                    self.project_path,
                    self._hdf_store,
                    self._fs_intf,
                    metadata,
                    options,
                    frequency=frequency,
                    mode=mode,
                )
                self._scenarios.append(scenario_result)
コード例 #12
0
ファイル: test_pv_reports.py プロジェクト: NREL/PyDSS
def test_pv_reports_per_element_per_time_point(cleanup_project):
    # Generates reports from data stored at every time point and then
    # use those to compare with the in-memory metrics.
    PyDssProject.run_project(
        PV_REPORTS_PROJECT_STORE_ALL_PATH,
        simulation_file=SIMULATION_SETTINGS_FILENAME,
    )

    baseline_thermal = SimulationThermalMetricsModel(**load_data(
        Path(PV_REPORTS_PROJECT_STORE_ALL_PATH) / "Reports" /
        "thermal_metrics.json"))
    baseline_voltage = SimulationVoltageMetricsModel(**load_data(
        Path(PV_REPORTS_PROJECT_STORE_ALL_PATH) / "Reports" /
        "voltage_metrics.json"))
    baseline_feeder_losses = SimulationFeederLossesMetricsModel(**load_data(
        Path(PV_REPORTS_PROJECT_STORE_ALL_PATH) / "Reports" /
        "feeder_losses.json"))

    granularities = [x for x in ReportGranularity]
    for granularity in granularities:
        settings = load_data(BASE_FILENAME)
        settings["Reports"]["Granularity"] = granularity.value
        dump_data(settings, TEST_FILENAME)
        try:
            PyDssProject.run_project(
                PV_REPORTS_PROJECT_PATH,
                simulation_file=TEST_SIM_BASE_NAME,
            )
            if granularity == ReportGranularity.PER_ELEMENT_PER_TIME_POINT:
                verify_skip_night()
                assert verify_thermal_metrics(baseline_thermal)
                assert verify_voltage_metrics(baseline_voltage)
                assert verify_feeder_losses(baseline_feeder_losses)
            verify_pv_reports(granularity)
            verify_feeder_head_metrics()
        finally:
            os.remove(TEST_FILENAME)
            for artifact in ARTIFACTS:
                if os.path.exists(artifact):
                    os.remove(artifact)
コード例 #13
0
def test_export_moving_averages(cleanup_project):
    # Compares the moving average storage/calculation with a rolling average
    # computed on dataset with every time point.
    path = CUSTOM_EXPORTS_PROJECT_PATH
    sim_file = SIMULATION_SETTINGS_FILENAME
    circuit = "Circuit.heco19021"
    window_size = 10
    PyDssProject.run_project(path, simulation_file=sim_file)

    # This DataFrame will have values at every time point.
    df1 = _get_dataframe(path, "Circuits", "LineLosses", circuit)
    assert len(df1) == 96
    df1_rm = df1.rolling(window_size).mean()

    data = {
        "Circuits": {
            "LineLosses": {
                "store_values_type": "moving_average",
                "window_size": window_size,
            },
        }
    }
    run_project_with_custom_exports(path, "scenario1", sim_file, data)
    results = PyDssResults(path)
    assert len(results.scenarios) == 1
    scenario = results.scenarios[0]

    # This DataFrame will have moving averages.
    df2 = _get_dataframe(path, "Circuits", "LineLossesAvg", circuit)
    assert len(df2) == 9

    df1_index = window_size - 1
    for df2_index in range(len(df2)):
        val1 = round(df1_rm.iloc[df1_index, 0], 5)
        val2 = round(df2.iloc[df2_index, 0], 5)
        assert val1 == val2
        df1_index += window_size
コード例 #14
0
def create_project(path=None, project=None, scenarios=None, simulation_file=None, simulation_config=None,
                   controller_types=None, export_modes=None, options=None, visualization_types=None,
                   opendss_project_folder=None, master_dss_file=None, force=False):
    """Create PyDSS project."""
    setup_logging("PyDSS", console_level=logging.INFO)
    if controller_types is not None:
        controller_types = [ControllerType(x) for x in controller_types.split(",")]
    if export_modes is not None:
        export_modes = [ExportMode(x) for x in export_modes.split(",")]
    if visualization_types is not None:
        visualization_types = [VisualizationType(x) for x in visualization_types.split(",")]

    if options is not None:
        options = ast.literal_eval(options)
        if not isinstance(options, dict):
            logger.error(f"options must be of type dict; received {type(options)}")
            sys.exit(1)

    scenarios = [
        PyDssScenario(
            name=x.strip(),
            controller_types=controller_types,
            export_modes=export_modes,
            visualization_types=visualization_types,
        ) for x in scenarios.split(",")
    ]
    PyDssProject.create_project(
        path,
        project,
        scenarios,
        simulation_config,
        options=options,
        simulation_file=simulation_file,
        master_dss_file=master_dss_file,
        opendss_project_folder = opendss_project_folder,
        force=force,
    )
コード例 #15
0
ファイル: extract.py プロジェクト: yuanzy97/PyDSS
def extract_element_files(project_path, output_dir=None, verbose=False):
    """Extract the element info files from an archived PyDSS project."""
    if not os.path.exists(project_path):
        print(f"project-path={project_path} does not exist")
        sys.exit(1)

    filename = "pydss_extract.log"
    console_level = logging.INFO
    file_level = logging.INFO
    if verbose:
        console_level = logging.DEBUG
        file_level = logging.DEBUG

    setup_logging(
        "PyDSS",
        filename=filename,
        console_level=console_level,
        file_level=file_level,
    )
    logger.info("CLI: [%s]", get_cli_string())

    project = PyDssProject.load_project(project_path)
    fs_intf = project.fs_interface
    results = PyDssResults(project_path)
    for scenario in results.scenarios:
        for filename in scenario.list_element_info_files():
            text = fs_intf.read_file(filename)

            if output_dir is None:
                path = os.path.join(project_path, filename)
            else:
                path = os.path.join(output_dir, filename)

            os.makedirs(os.path.dirname(path), exist_ok=True)
            with open(path, "w") as f_out:
                f_out.write(text)

            print(f"Extracted {filename} to {path}")
コード例 #16
0
ファイル: extract.py プロジェクト: yuanzy97/PyDSS
def extract(project_path, file_path, output_dir=None, verbose=False):
    """Extract a file from an archived PyDSS project."""
    if not os.path.exists(project_path):
        print(f"project-path={project_path} does not exist")
        sys.exit(1)

    filename = "pydss_extract.log"
    console_level = logging.INFO
    file_level = logging.INFO
    if verbose:
        console_level = logging.DEBUG
        file_level = logging.DEBUG

    setup_logging(
        "PyDSS",
        filename=filename,
        console_level=console_level,
        file_level=file_level,
    )
    logger.info("CLI: [%s]", get_cli_string())

    project = PyDssProject.load_project(project_path)
    data = project.fs_interface.read_file(file_path)

    if output_dir is None:
        path = os.path.join(project_path, file_path)
    else:
        path = os.path.join(output_dir, file_path)

    os.makedirs(os.path.dirname(path), exist_ok=True)
    ext = os.path.splitext(file_path)[1]
    mode = "wb" if ext in (".h5", ".feather") else "w"
    with open(path, mode) as f_out:
        f_out.write(data)

    print(f"Extracted {file_path} to {path}")
コード例 #17
0
ファイル: test_pydss_project.py プロジェクト: yuanzy97/PyDSS
def run_test_project_by_property(tar_project, zip_project):
    project = PyDssProject.load_project(RUN_PROJECT_PATH)
    PyDssProject.run_project(
        RUN_PROJECT_PATH,
        tar_project=tar_project,
        zip_project=zip_project,
        simulation_file=SIMULATION_SETTINGS_FILENAME,
    )
    results = PyDssResults(RUN_PROJECT_PATH)
    assert len(results.scenarios) == 1
    assert results._hdf_store.attrs["version"] == DATA_FORMAT_VERSION
    scenario = results.scenarios[0]
    assert isinstance(scenario, PyDssScenarioResults)
    elem_classes = scenario.list_element_classes()
    expected_elem_classes = list(EXPECTED_ELEM_CLASSES_PROPERTIES.keys())
    expected_elem_classes.sort()
    assert elem_classes == expected_elem_classes
    for elem_class in elem_classes:
        expected_properties = EXPECTED_ELEM_CLASSES_PROPERTIES[elem_class]
        expected_properties.sort()
        properties = scenario.list_element_properties(elem_class)
        assert properties == expected_properties
        for prop in properties:
            element_names = scenario.list_element_names(elem_class, prop)
            for name in element_names:
                df = scenario.get_dataframe(elem_class, prop, name)
                assert isinstance(df, pd.DataFrame)
                assert len(df) == 96
            for name, df in scenario.iterate_dataframes(elem_class, prop):
                assert name in element_names
                assert isinstance(df, pd.DataFrame)

    # Test with an option.
    assert scenario.list_element_property_options(
        "Lines", "Currents") == ["phase_terminal"]
    df = scenario.get_dataframe("Lines",
                                "Currents",
                                "Line.sw0",
                                phase_terminal="A1")
    assert isinstance(df, pd.DataFrame)
    assert len(df) == 96
    assert len(df.columns) == 1
    step = datetime.timedelta(
        seconds=project.simulation_config["Project"]["Step resolution (sec)"])
    assert df.index[1] - df.index[0] == step

    df = scenario.get_dataframe("Lines",
                                "CurrentsMagAng",
                                "Line.sw0",
                                phase_terminal="A1",
                                mag_ang="mag")
    assert isinstance(df, pd.DataFrame)
    assert len(df) == 96
    assert len(df.columns) == 1

    df = scenario.get_dataframe("Lines",
                                "CurrentsMagAng",
                                "Line.sw0",
                                phase_terminal=None,
                                mag_ang="ang")
    assert isinstance(df, pd.DataFrame)
    assert len(df.columns) == 2
    assert len(df) == 96

    regex = re.compile(r"[ABCN]1")
    df = scenario.get_dataframe("Lines",
                                "Currents",
                                "Line.sw0",
                                phase_terminal=regex)
    assert isinstance(df, pd.DataFrame)
    assert len(df.columns) == 1
    assert len(df) == 96

    option_values = scenario.get_option_values("Lines", "Currents", "Line.sw0")
    assert option_values == ["A1", "A2"]

    prop = "Currents"
    full_df = scenario.get_full_dataframe("Lines", prop)
    assert len(full_df.columns) >= len(
        scenario.list_element_names("Lines", prop))
    for column in full_df.columns:
        assert "Unnamed" not in column
    assert len(full_df) == 96

    element_info_files = scenario.list_element_info_files()
    assert element_info_files
    for filename in element_info_files:
        df = scenario.read_element_info_file(filename)
        assert isinstance(df, pd.DataFrame)

    # Test the shortcut.
    df = scenario.read_element_info_file("PVSystems")
    assert isinstance(df, pd.DataFrame)

    cap_changes = scenario.read_capacitor_changes()
コード例 #18
0
def test_custom_exports(cleanup_project):
    all_node_voltages = _get_all_node_voltages()

    PyDssProject.run_project(
        CUSTOM_EXPORTS_PROJECT_PATH,
        simulation_file=SIMULATION_SETTINGS_FILENAME,
    )
    results = PyDssResults(CUSTOM_EXPORTS_PROJECT_PATH)
    assert len(results.scenarios) == 1
    scenario = results.scenarios[0]

    # Property stored at all time points.
    df = scenario.get_full_dataframe("Buses", "puVmagAngle")
    assert isinstance(df, pd.DataFrame)
    assert len(df) == 96

    # Property stored with a moving average.
    df = scenario.get_dataframe("Buses", "DistanceAvg", "t9")
    assert isinstance(df, pd.DataFrame)
    assert len(df) == int(96)
    #assert len(df) == int(96 / 5)
    for val in df.iloc[9:, 0]:
        assert round(val, 3) == 0.082

    # TODO DT: these values are no longer correct. What should they be?
    # Filtered value on custom function.
    #df = scenario.get_dataframe("Lines", "LoadingPercent", "Line.sl_22")
    #assert len(df) == 14

    #df = scenario.get_dataframe("Lines", "LoadingPercentAvg", "Line.sl_22")
    # This was computed from raw data.
    #assert len(df) == 9
    # TODO incorrect after more decimal points
    #assert round(df.iloc[:, 0].values[8], 2) == 22.79

    # Subset of names. VoltagesMagAng has specific names, CurrentsMagAng has regex
    for name in ("Line.pvl_110", "Line.pvl_111", "Line.pvl_112",
                 "Line.pvl_113"):
        properties = scenario.list_element_properties("Lines",
                                                      element_name=name)
        assert "VoltagesMagAng" in properties
        assert "CurrentsMagAng" in properties

    properties = scenario.list_element_properties("Lines",
                                                  element_name="Line.SL_14")
    assert "VoltagesMagAng" not in properties
    assert "CurrentsMagAng" not in properties

    # TODO: This metric no longer stores voltages in a dataframe.
    # That functionality could be recovered in PyDSS/metrics.py or we could implement this with
    # a different export property.
    #node_names = scenario.list_element_names("Nodes", "VoltageMetric")
    #dfs = scenario.get_filtered_dataframes("Nodes", "VoltageMetric")
    #assert len(node_names) == len(dfs)
    #assert sorted(node_names) == sorted(dfs.keys())
    #for i, node_name in enumerate(node_names):
    #    column = node_name + "__Voltage"
    #    df = dfs[node_name]
    #    # TODO: Slight rounding errors make this intermittent.
    #    #expected = all_node_voltages[column]
    #    #expected = expected[(expected < 1.02) | (expected > 1.04)]
    #    #assert len(df[column]) == len(expected)
    #    #assert_series_equal(df[column], expected, check_names=False)
    #    df2 = scenario.get_dataframe("Nodes", "VoltageMetric", node_name)
    #    assert_series_equal(df[column], df2[column], check_names=False)

    ## Two types of sums are stored.
    normal_amps_sum = scenario.get_element_property_value(
        "Lines", "NormalAmpsSum", "Line.pvl_110")
    assert normal_amps_sum == 96 * 65.0
    scenario.get_element_property_value("Lines", "CurrentsSum", "Line.pvl_110")
    scenario.get_element_property_value("Circuits", "LossesSum",
                                        "Circuit.heco19021")

    sums_json = os.path.join(CUSTOM_EXPORTS_PROJECT_PATH, "Exports",
                             "scenario1", "element_property_values.json")
    assert os.path.exists(sums_json)
    data = load_data(sums_json)
    assert data

    pv_profiles = scenario.read_pv_profiles()
    assert pv_profiles["pv_systems"]
    for info in pv_profiles["pv_systems"]:
        assert isinstance(info["name"], str)
        assert isinstance(info["irradiance"], float)
        assert isinstance(info["pmpp"], float)
        assert isinstance(info["load_shape_profile"], str)
        assert isinstance(info["load_shape_pmult_sum"], float)
コード例 #19
0
def test_pv_powers_by_customer_type(cleanup_project):
    """Verify that PVSystem power output values collected by all variations match."""
    path = CUSTOM_EXPORTS_PROJECT_PATH
    PyDssProject.run_project(path,
                             simulation_file=SIMULATION_SETTINGS_FILENAME)
    com_pv_systems = set(["pvgnem_mpx000635970", "pvgnem_mpx000460267"])
    res_pv_systems = set(
        ["pvgnem_mpx000594341", "pvgui_mpx000637601", "pvgui_mpx000460267"])

    # Collect power for every PVSystem at every time point.
    df = _get_full_dataframe(path, "PVSystems", "Powers")
    com_cols, res_cols = _get_customer_type_columns(df, com_pv_systems,
                                                    res_pv_systems)
    com_sum1 = df[com_cols].sum().sum()
    res_sum1 = df[res_cols].sum().sum()
    total_sum1 = df.sum().sum()
    assert total_sum1 == com_sum1 + res_sum1

    # Collect a running sum for all PVSystem power output.
    data = {
        "PVSystems": {
            "Powers": {
                "store_values_type": "sum",
                "sum_elements": True,
            },
        }
    }
    run_project_with_custom_exports(path, "scenario1",
                                    SIMULATION_SETTINGS_FILENAME, data)
    total_sum2 = sum(
        _get_summed_element_total(path, "PVSystems", "PowersSum").values())
    assert math.isclose(total_sum1.real, total_sum2.real) and math.isclose(
        total_sum1.imag, total_sum2.imag)

    # Collect power for PVSystems aggregated by customer type at every time point.
    data = {
        "PVSystems": {
            "Powers": {
                "store_values_type":
                "all",
                "sum_groups": [{
                    "name": "com",
                    "elements": list(com_pv_systems),
                }, {
                    "name": "res",
                    "elements": list(res_pv_systems),
                }],
            },
        }
    }
    run_project_with_custom_exports(path, "scenario1",
                                    SIMULATION_SETTINGS_FILENAME, data)
    com_sum3 = _get_summed_element_dataframe(path,
                                             "PVSystems",
                                             "Powers",
                                             group="com").sum().sum()
    res_sum3 = _get_summed_element_dataframe(path,
                                             "PVSystems",
                                             "Powers",
                                             group="res").sum().sum()
    assert math.isclose(com_sum1.real, com_sum3.real) and math.isclose(
        com_sum1.imag, com_sum3.imag)
    assert math.isclose(res_sum1.real, res_sum3.real) and math.isclose(
        res_sum1.imag, res_sum3.imag)

    # Collect a running sum for all PVSystems by customer type.
    data = {
        "PVSystems": {
            "Powers": {
                "store_values_type":
                "sum",
                "sum_groups": [{
                    "name": "com",
                    "elements": list(com_pv_systems),
                }, {
                    "name": "res",
                    "elements": list(res_pv_systems),
                }],
            },
        }
    }
    run_project_with_custom_exports(path, "scenario1",
                                    SIMULATION_SETTINGS_FILENAME, data)
    com_sum4 = sum(
        _get_summed_element_total(path, "PVSystems", "PowersSum",
                                  group="com").values())
    res_sum4 = sum(
        _get_summed_element_total(path, "PVSystems", "PowersSum",
                                  group="res").values())
    assert math.isclose(com_sum1.real, com_sum4.real) and math.isclose(
        com_sum1.imag, com_sum4.imag)
    assert math.isclose(res_sum1.real, res_sum4.real) and math.isclose(
        res_sum1.imag, res_sum4.imag)
コード例 #20
0
def test_custom_exports(cleanup_project):
    PyDssProject.run_project(
        CUSTOM_EXPORTS_PROJECT_PATH,
        simulation_file=SIMULATION_SETTINGS_FILENAME,
    )
    results = PyDssResults(CUSTOM_EXPORTS_PROJECT_PATH)
    assert len(results.scenarios) == 1
    scenario = results.scenarios[0]

    # Property stored at all time points.
    df = scenario.get_full_dataframe("Buses", "puVmagAngle")
    assert isinstance(df, pd.DataFrame)
    assert len(df) == 96

    # Property stored with a moving average.
    df = scenario.get_dataframe("Buses", "DistanceAvg", "t9")
    assert isinstance(df, pd.DataFrame)
    assert len(df) == int(96 / 5)
    for i, row in df.iterrows():
        assert round(row["t9__DistanceAvg"], 3) == 0.082

    transformers = scenario.list_element_names("Transformers")
    df = scenario.get_dataframe("Transformers", "CurrentsAvg", transformers[0])
    assert len(df) < 96

    df = scenario.get_dataframe("Lines", "LoadingPercentAvg", "Line.sl_22")
    assert len(df) == 2

    # Filtered value on custom function.
    df = scenario.get_dataframe("Lines", "LoadingPercent", "Line.sl_22")
    assert len(df) == 17

    # Subset of names. VoltagesMagAng has specific names, CurrentsMagAng has regex
    for name in ("Line.pvl_110", "Line.pvl_111", "Line.pvl_112",
                 "Line.pvl_113"):
        properties = scenario.list_element_properties("Lines",
                                                      element_name=name)
        assert "VoltagesMagAng" in properties
        assert "CurrentsMagAng" in properties

    properties = scenario.list_element_properties("Lines",
                                                  element_name="Line.SL_14")
    assert "VoltagesMagAng" not in properties
    assert "CurrentsMagAng" not in properties

    # Two types of sums are stored.
    normal_amps_sum = scenario.get_element_property_number(
        "Lines", "NormalAmpsSum", "Line.pvl_110")
    assert normal_amps_sum == 96 * 65.0
    scenario.get_element_property_number("Lines", "CurrentsSum",
                                         "Line.pvl_110")
    scenario.get_element_property_number("Circuits", "LossesSum",
                                         "Circuit.heco19021")

    sums_json = os.path.join(CUSTOM_EXPORTS_PROJECT_PATH, "Exports",
                             "scenario1", "element_property_numbers.json")
    assert os.path.exists(sums_json)
    data = load_data(sums_json)
    assert data

    pv_profiles = scenario.read_pv_profiles()
    assert pv_profiles["pv_systems"]
    for info in pv_profiles["pv_systems"]:
        assert isinstance(info["name"], str)
        assert isinstance(info["irradiance"], float)
        assert isinstance(info["pmpp"], float)
        assert isinstance(info["load_shape_profile"], str)
        assert isinstance(info["load_shape_pmult_sum"], float)
コード例 #21
0
def run(project_path, options=None, tar_project=False, zip_project=False, verbose=False, simulations_file=None, dry_run=False):
    """Run a PyDSS simulation."""
    if not os.path.exists(project_path):
        print(f"project-path={project_path} does not exist")
        sys.exit(1)

    config = PyDssProject.load_simulation_config(project_path, simulations_file)
    if verbose:
        # Override the config file.
        config["Logging"]["Logging Level"] = logging.DEBUG

    filename = None
    console_level = logging.INFO
    file_level = logging.INFO
    if not config["Logging"]["Display on screen"]:
        console_level = logging.ERROR
    if verbose:
        console_level = logging.DEBUG
        file_level = logging.DEBUG
    if config["Logging"]["Log to external file"]:
        logs_path = os.path.join(project_path, "Logs")
        filename = os.path.join(
            logs_path,
            os.path.basename(project_path) + ".log",
        )

    if not os.path.exists(logs_path):
        print("Logs path does not exist. 'run' is not supported on a tarred project.")
        sys.exit(1)

    setup_logging(
        "PyDSS",
        filename=filename,
        console_level=console_level,
        file_level=file_level,
    )
    logger.info("CLI: [%s]", get_cli_string())

    if options is not None:
        options = ast.literal_eval(options)
        if not isinstance(options, dict):
            print(f"options must be of type dict; received {type(options)}")
            sys.exit(1)

    project = PyDssProject.load_project(project_path, options=options, simulation_file=simulations_file)
    project.run(tar_project=tar_project, zip_project=zip_project, dry_run=dry_run)

    if dry_run:
        print("="*30)
        maxlen = max([len(k) for k in project.estimated_space.keys()])
        if len("ScenarioName") > maxlen:
            maxlen = len("ScenarioName")
        template = "{:<{width}}   {}\n".format("ScenarioName", "EstimatedSpace", width=maxlen)
        
        total_size = 0
        for k, v in project.estimated_space.items():
            total_size += v
            vstr = make_human_readable_size(v)
            template += "{:<{width}} : {}\n".format(k, vstr, width=maxlen)
        template = template.strip()
        print(template)
        print("-"*30)
        print(f"TotalSpace: {make_human_readable_size(total_size)}")
        print("="*30)
        print("Note: compression may reduce the size by ~90% depending on the data.")
コード例 #22
0
ファイル: handler.py プロジェクト: pk-organics/PyDSS
    async def get_pydss_project_info(self, request, path):
        """
        ---
        summary: Returns a dictionary of valid project and scenarios in the provided path
        tags:
         - PyDSS project
        parameters:
         - name: path
           in: query
           required: true
           schema:
              type: string
              example: C:/Users/alatif/Desktop/PyDSS_2.0/PyDSS/examples
        responses:
         '200':
           description: Successfully retrieved project information
           content:
              application/json:
                schema:
                    type: object
                examples:
                    get_instance_status:
                        value:
                            Status: 200
                            Message: PyDSS instance with the provided UUID is currently running
                            UUID: 96c21e00-cd3c-4943-a914-14451f5f7ab6
                            "Data": {'Project1' : {'Scenario1', 'Scenario2'}, 'Project2' : {'Scenario1'}}
         '406':
           description: Provided path does not exist
           content:
              application/json:
                schema:
                    type: object
                examples:
                    get_instance_status:
                        value:
                            Status: 406
                            Message: Provided path does not exist
                            UUID: None
        """
        logger.info(f"Exploring {path} for valid projects")

        if not os.path.exists(path):
            return web.json_response({
                "Status": 404,
                "Message": f"Provided path does not exist",
                "UUID": None
            })

        subfolders = [f.path for f in os.scandir(path) if f.is_dir()]
        projects = {}
        for folder in subfolders:
            try:
                pydss_project = PyDssProject.load_project(folder)
                projects[pydss_project._name] = [x.name for x in pydss_project.scenarios]
            except:
                pass

        n = len(projects)
        if n > 0:
            return web.json_response({"Status": 200,
                                      "Message": f"{n} valid projects found",
                                      "UUID": None,
                                      "Data": projects})
        else:
            web.json_response({"Status": 404,
                               "Message": f"No valid PyDSS project in provided base path",
                               "UUID": None})
コード例 #23
0
ファイル: run.py プロジェクト: NREL/PyDSS
def run(project_path,
        options=None,
        tar_project=False,
        zip_project=False,
        verbose=False,
        simulations_file=None,
        dry_run=False):
    """Run a PyDSS simulation."""
    project_path = Path(project_path)
    settings = PyDssProject.load_simulation_settings(project_path,
                                                     simulations_file)
    if verbose:
        # Override the config file.
        settings.logging.logging_level = logging.DEBUG

    filename = None
    console_level = logging.INFO
    file_level = logging.INFO
    if not settings.logging.enable_console:
        console_level = logging.ERROR
    if verbose:
        console_level = logging.DEBUG
        file_level = logging.DEBUG
    if settings.logging.enable_file:
        logs_path = project_path / "Logs"
        if not logs_path.exists():
            logger.error("Logs path %s does not exist", logs_path)
            sys.exit(1)
        filename = logs_path / "pydss.log"

    setup_logging(
        "PyDSS",
        filename=filename,
        console_level=console_level,
        file_level=file_level,
    )
    logger.info("CLI: [%s]", get_cli_string())

    if options is not None:
        options = ast.literal_eval(options)
        if not isinstance(options, dict):
            logger.error("options are invalid: %s", options)
            sys.exit(1)

    project = PyDssProject.load_project(project_path,
                                        options=options,
                                        simulation_file=simulations_file)
    project.run(tar_project=tar_project,
                zip_project=zip_project,
                dry_run=dry_run)

    if dry_run:
        maxlen = max([len(k) for k in project.estimated_space.keys()])
        if len("ScenarioName") > maxlen:
            maxlen = len("ScenarioName")
        template = "{:<{width}}   {}\n".format("ScenarioName",
                                               "EstimatedSpace",
                                               width=maxlen)

        total_size = 0
        for k, v in project.estimated_space.items():
            total_size += v
            vstr = make_human_readable_size(v)
            template += "{:<{width}} : {}\n".format(k, vstr, width=maxlen)
        template = template.strip()
        logger.info(template)
        logger.info("-" * 30)
        logger.info(f"TotalSpace: {make_human_readable_size(total_size)}")
        logger.info("=" * 30)
        logger.info(
            "Note: compression may reduce the size by ~90% depending on the data."
        )
コード例 #24
0
ファイル: handler.py プロジェクト: pk-organics/PyDSS
    async def post_pydss_create(self, request):
        """
        ---
        summary: Creates a new project for PyDSS (User uploads a zipped OpenDSS model)
        tags:
         - PyDSS project
        requestBody:
            content:
                multipart/form-data:
                    schema:
                      type: object
                      properties:
                        master_file:
                          type: string
                          example: Master_Spohn_existing_VV.dss
                        project:
                          type: string
                          example: test_project
                        scenarios:
                          type: string
                          description: comma separated list of PyDSS scenarios to be created
                          example: base_case,pv_scenario
                        controller_types:
                          type: string
                          description: comma separated list of PyDSS controller names
                          example: PvController,StorageController
                        visualization_types:
                          type: string
                          description: comma separated list of PyDSS plot names
                          example: Histogram,TimeSeries
                        fileName:
                          type: string
                          format: binary
        responses:
         '200':
           description: Successfully retrieved project information
           content:
              application/json:
                schema:
                    type: object
                examples:
                    get_instance_status:
                        value:
                            Status: 200
                            Message: PyDSS project created
                            UUID: None
         '403':
           description: Provided path does not exist
           content:
              application/json:
                schema:
                    type: object
                examples:
                    get_instance_status:
                        value:
                            Status: 403
                            Message: User does not have access to delete folders
                            UUID: None
        """

        from zipfile import ZipFile
        examples_path = os.path.join("C:/Users/alatif/Desktop/PyDSS_2.0/PyDSS/", 'examples')
        unzip_path = os.path.join(examples_path, "uploaded_opendss_project")
        zip_path = os.path.join(examples_path, "uploaded_opendss_project.zip")

        data = None
        with open(zip_path, 'wb') as fd:
            while True:

                chunk = await request.content.read(1024)
                if data is None:
                    data = chunk
                else:
                    data += chunk
                if not chunk:
                    break
                fd.write(chunk)

        data = bytestream_decode(data)
        os.makedirs(unzip_path, exist_ok=True)
        with ZipFile(zip_path, 'r') as zipObj:
            zipObj.extractall(path=unzip_path)

        controller_types = [ControllerType(x) for x in data['controller_types'].split(",")]
        visualization_types = [VisualizationType(x) for x in data['visualization_types'].split(",")]

        scenarios = [
            PyDssScenario(
                name=x.strip(),
                controller_types=controller_types,
                visualization_types=visualization_types,
            ) for x in data['scenarios'].split(",")
        ]

        PyDssProject.create_project(path=examples_path, name=data['project'], scenarios=scenarios,
                                    opendss_project_folder=unzip_path, master_dss_file=data['master_file'])

        try:
            shutil.rmtree(unzip_path)
            if os.path.exists(zip_path):
                os.remove(zip_path)
        except:
            return web.json_response({
                'Status': 403,
                'Message': 'User does not have access to delete folders',
                'UUID': None
            })

        result = {'Status': 200,
                  'Message': 'PyDSS project created',
                  'UUID': None}

        # name, scenarios, simulation_config = None, options = None,
        # simulation_file = SIMULATION_SETTINGS_FILENAME, opendss_project_folder = None,
        # master_dss_file = OPENDSS_MASTER_FILENAME

        return web.json_response(result)