예제 #1
0
    def run(self, logging_configured=True, tar_project=False, zip_project=False, dry_run=False):
        """Run all scenarios in the project."""
        if isinstance(self._fs_intf, PyDssArchiveFileInterfaceBase):
            raise InvalidConfiguration("cannot run from an archived project")
        if tar_project and zip_project:
            raise InvalidParameter("tar_project and zip_project cannot both be True")
        if self._simulation_config['Project']['DSS File'] == "":
            raise InvalidConfiguration("a valid opendss file needs to be passed")

        inst = instance()
        self._simulation_config["Logging"]["Pre-configured logging"] = logging_configured

        if dry_run:
            store_filename = os.path.join(tempfile.gettempdir(), STORE_FILENAME)
        else:
            store_filename = os.path.join(self._project_dir, STORE_FILENAME)

        driver = None
        if self._simulation_config["Exports"].get("Export Data In Memory", True):
            driver = "core"
        with h5py.File(store_filename, mode="w", driver=driver) as hdf_store:
            self._hdf_store = hdf_store
            self._hdf_store.attrs["version"] = DATA_FORMAT_VERSION
            for scenario in self._scenarios:
                self._simulation_config["Project"]["Active Scenario"] = scenario.name
                inst.run(self._simulation_config, self, scenario, dry_run=dry_run)
                self._estimated_space[scenario.name] = inst.get_estimated_space()

        if not dry_run:
            results = None
            export_tables = self._simulation_config["Exports"].get(
                "Export Data Tables", False
            )
            generate_reports = self._simulation_config.get("Reports", False)
            if export_tables or generate_reports:
                # Hack. Have to import here. Need to re-organize to fix.
                from PyDSS.pydss_results import PyDssResults
                results = PyDssResults(self._project_dir)
                if export_tables:
                    for scenario in results.scenarios:
                        scenario.export_data()

                if generate_reports:
                    results.generate_reports()

        if tar_project:
            self._tar_project_files()
        elif zip_project:
            self._zip_project_files()

        if dry_run and os.path.exists(store_filename):
            os.remove(store_filename)
예제 #2
0
def export(project_path,
           fmt="csv",
           compress=False,
           output_dir=None,
           verbose=False):
    """Export data from a PyDSS project."""
    if not os.path.exists(project_path):
        sys.exit(1)

    filename = "pydss_export.log"
    console_level = logging.INFO
    file_level = logging.INFO
    if verbose:
        console_level = logging.DEBUG
        file_level = logging.DEBUG

    setup_logging(
        "PyDSS",
        filename=filename,
        console_level=console_level,
        file_level=file_level,
    )
    logger.info("CLI: [%s]", get_cli_string())

    results = PyDssResults(project_path)
    for scenario in results.scenarios:
        scenario.export_data(output_dir, fmt=fmt, compress=compress)
예제 #3
0
def test_pv_reports(cleanup_project):
    PyDssProject.run_project(
        PV_REPORTS_PROJECT_PATH,
        simulation_file=SIMULATION_SETTINGS_FILENAME,
    )
    results = PyDssResults(PV_REPORTS_PROJECT_PATH)

    # This test data doesn't have changes for Capacitors or RegControls.
    capacitor_change_counts = results.read_report(
        "Capacitor State Change Counts")
    assert len(capacitor_change_counts["scenarios"]) == 2
    assert not capacitor_change_counts["scenarios"][1]["capacitors"]

    reg_control_change_counts = results.read_report(
        "RegControl Tap Number Change Counts")
    assert len(reg_control_change_counts["scenarios"]) == 2
    assert not reg_control_change_counts["scenarios"][1]["reg_controls"]

    pv_clipping = results.read_report("PV Clipping")
    assert len(pv_clipping["pv_systems"]) == 5
    for pv_system in pv_clipping["pv_systems"]:
        assert "pv_clipping" in pv_system

    pv_curtailment = results.read_report("PV Curtailment")
    assert isinstance(pv_curtailment, pd.DataFrame)
예제 #4
0
파일: data_viewer.py 프로젝트: NREL/PyDSS
    def _on_load_project_click(self, _):
        path = self._project_path_text.value
        if path == "":
            print("Project Path cannot be empty.", file=sys.stderr)
            return

        self._results = PyDssResults(path)
        self._timestamps = self._results.scenarios[0].get_timestamps()
        self._start_time_text.value = str(self._timestamps.iloc[0])
        self._end_time_text.value = str(self._timestamps.iloc[-1])
        self._resolution_text.value = str(self._timestamps.iloc[1] -
                                          self._timestamps.iloc[0])
        self._enable_project_actions()
예제 #5
0
def verify_skip_night():
    results = PyDssResults(PV_REPORTS_PROJECT_PATH)
    scenario = results.scenarios[0]
    df = scenario.get_full_dataframe("PVSystems", "Powers")
    # Anytime before 6am or after 6pm should be excluded.
    # Some times in the middle of the day have convergence errors.
    for i in range(24):
        for val in df.iloc[i, :]:
            assert np.isnan(val)
    for i in range(24, 30):
        for val in df.iloc[i, :]:
            assert not np.isnan(val)
    for i in range(90, 96):
        for val in df.iloc[i, :]:
            assert np.isnan(val)
예제 #6
0
def _get_all_node_voltages():
    data = {
        "Nodes": {
            "VoltageMetric": {
                "store_values_type": "all",
                "limits": [1.02, 1.04],
                "limits_b": [1.01, 1.05],
            },
        }
    }
    path = CUSTOM_EXPORTS_PROJECT_PATH
    sim_file = SIMULATION_SETTINGS_FILENAME
    run_project_with_custom_exports(path, "scenario1", sim_file, data)
    results = PyDssResults(path)
    assert len(results.scenarios) == 1
    scenario = results.scenarios[0]
예제 #7
0
def verify_feeder_head_metrics():
    results = PyDssResults(PV_REPORTS_PROJECT_PATH)
    scenario = results.scenarios[0]
    df = scenario.get_full_dataframe("FeederHead", "load_kvar")
    assert len(df) == 96
    assert np.isnan(df.values[0])
    df = scenario.get_full_dataframe("FeederHead", "load_kw")
    assert len(df) == 96
    assert np.isnan(df.values[0])
    df = scenario.get_full_dataframe("FeederHead", "loading")
    assert len(df) == 96
    assert np.isnan(df.values[0])
    df = scenario.get_full_dataframe("FeederHead", "reverse_power_flow")
    assert len(df) == 96
    # TODO: figure out why this doesn't come back as NaN
    assert df.values[0] == -9999
예제 #8
0
def test_export_moving_averages(cleanup_project):
    # Compares the moving average storage/calculation with a rolling average
    # computed on dataset with every time point.
    path = CUSTOM_EXPORTS_PROJECT_PATH
    sim_file = SIMULATION_SETTINGS_FILENAME
    circuit = "Circuit.heco19021"
    window_size = 10
    PyDssProject.run_project(path, simulation_file=sim_file)

    # This DataFrame will have values at every time point.
    df1 = _get_dataframe(path,
                         "Circuits",
                         "LineLosses",
                         circuit,
                         real_only=True)
    assert len(df1) == 96
    df1_rm = df1.rolling(window_size).mean()

    data = {
        "Circuits": {
            "LineLosses": {
                "store_values_type": "moving_average",
                "window_size": window_size,
            },
        }
    }
    run_project_with_custom_exports(path, "scenario1", sim_file, data)
    results = PyDssResults(path)
    assert len(results.scenarios) == 1
    scenario = results.scenarios[0]

    # This DataFrame will have moving averages.
    df2 = _get_dataframe(path,
                         "Circuits",
                         "LineLossesAvg",
                         circuit,
                         real_only=True)
    assert len(df2) == 96

    for val1, val2 in zip(df1_rm.iloc[:, 0].values, df2.iloc[:, 0].values):
        if np.isnan(val1):
            assert np.isnan(val2)
        else:
            assert round(val2, 5) == round(val1, 5)
예제 #9
0
파일: extract.py 프로젝트: yuanzy97/PyDSS
def extract_element_files(project_path, output_dir=None, verbose=False):
    """Extract the element info files from an archived PyDSS project."""
    if not os.path.exists(project_path):
        print(f"project-path={project_path} does not exist")
        sys.exit(1)

    filename = "pydss_extract.log"
    console_level = logging.INFO
    file_level = logging.INFO
    if verbose:
        console_level = logging.DEBUG
        file_level = logging.DEBUG

    setup_logging(
        "PyDSS",
        filename=filename,
        console_level=console_level,
        file_level=file_level,
    )
    logger.info("CLI: [%s]", get_cli_string())

    project = PyDssProject.load_project(project_path)
    fs_intf = project.fs_interface
    results = PyDssResults(project_path)
    for scenario in results.scenarios:
        for filename in scenario.list_element_info_files():
            text = fs_intf.read_file(filename)

            if output_dir is None:
                path = os.path.join(project_path, filename)
            else:
                path = os.path.join(output_dir, filename)

            os.makedirs(os.path.dirname(path), exist_ok=True)
            with open(path, "w") as f_out:
                f_out.write(text)

            print(f"Extracted {filename} to {path}")
예제 #10
0
def test_export_moving_averages(cleanup_project):
    # Compares the moving average storage/calculation with a rolling average
    # computed on dataset with every time point.
    path = CUSTOM_EXPORTS_PROJECT_PATH
    sim_file = SIMULATION_SETTINGS_FILENAME
    circuit = "Circuit.heco19021"
    window_size = 10
    PyDssProject.run_project(path, simulation_file=sim_file)

    # This DataFrame will have values at every time point.
    df1 = _get_dataframe(path, "Circuits", "LineLosses", circuit)
    assert len(df1) == 96
    df1_rm = df1.rolling(window_size).mean()

    data = {
        "Circuits": {
            "LineLosses": {
                "store_values_type": "moving_average",
                "window_size": window_size,
            },
        }
    }
    run_project_with_custom_exports(path, "scenario1", sim_file, data)
    results = PyDssResults(path)
    assert len(results.scenarios) == 1
    scenario = results.scenarios[0]

    # This DataFrame will have moving averages.
    df2 = _get_dataframe(path, "Circuits", "LineLossesAvg", circuit)
    assert len(df2) == 9

    df1_index = window_size - 1
    for df2_index in range(len(df2)):
        val1 = round(df1_rm.iloc[df1_index, 0], 5)
        val2 = round(df2.iloc[df2_index, 0], 5)
        assert val1 == val2
        df1_index += window_size
예제 #11
0
def _get_data_common(method_name, path, *args, **kwargs):
    results = PyDssResults(path)
    assert len(results.scenarios) == 1
    scenario = results.scenarios[0]
    method = getattr(scenario, method_name)
    return method(*args, **kwargs)
예제 #12
0
def test_custom_exports(cleanup_project):
    all_node_voltages = _get_all_node_voltages()

    PyDssProject.run_project(
        CUSTOM_EXPORTS_PROJECT_PATH,
        simulation_file=SIMULATION_SETTINGS_FILENAME,
    )
    results = PyDssResults(CUSTOM_EXPORTS_PROJECT_PATH)
    assert len(results.scenarios) == 1
    scenario = results.scenarios[0]

    # Property stored at all time points.
    df = scenario.get_full_dataframe("Buses", "puVmagAngle")
    assert isinstance(df, pd.DataFrame)
    assert len(df) == 96

    # Property stored with a moving average.
    df = scenario.get_dataframe("Buses", "DistanceAvg", "t9")
    assert isinstance(df, pd.DataFrame)
    assert len(df) == int(96)
    #assert len(df) == int(96 / 5)
    for val in df.iloc[9:, 0]:
        assert round(val, 3) == 0.082

    # TODO DT: these values are no longer correct. What should they be?
    # Filtered value on custom function.
    #df = scenario.get_dataframe("Lines", "LoadingPercent", "Line.sl_22")
    #assert len(df) == 14

    #df = scenario.get_dataframe("Lines", "LoadingPercentAvg", "Line.sl_22")
    # This was computed from raw data.
    #assert len(df) == 9
    # TODO incorrect after more decimal points
    #assert round(df.iloc[:, 0].values[8], 2) == 22.79

    # Subset of names. VoltagesMagAng has specific names, CurrentsMagAng has regex
    for name in ("Line.pvl_110", "Line.pvl_111", "Line.pvl_112",
                 "Line.pvl_113"):
        properties = scenario.list_element_properties("Lines",
                                                      element_name=name)
        assert "VoltagesMagAng" in properties
        assert "CurrentsMagAng" in properties

    properties = scenario.list_element_properties("Lines",
                                                  element_name="Line.SL_14")
    assert "VoltagesMagAng" not in properties
    assert "CurrentsMagAng" not in properties

    # TODO: This metric no longer stores voltages in a dataframe.
    # That functionality could be recovered in PyDSS/metrics.py or we could implement this with
    # a different export property.
    #node_names = scenario.list_element_names("Nodes", "VoltageMetric")
    #dfs = scenario.get_filtered_dataframes("Nodes", "VoltageMetric")
    #assert len(node_names) == len(dfs)
    #assert sorted(node_names) == sorted(dfs.keys())
    #for i, node_name in enumerate(node_names):
    #    column = node_name + "__Voltage"
    #    df = dfs[node_name]
    #    # TODO: Slight rounding errors make this intermittent.
    #    #expected = all_node_voltages[column]
    #    #expected = expected[(expected < 1.02) | (expected > 1.04)]
    #    #assert len(df[column]) == len(expected)
    #    #assert_series_equal(df[column], expected, check_names=False)
    #    df2 = scenario.get_dataframe("Nodes", "VoltageMetric", node_name)
    #    assert_series_equal(df[column], df2[column], check_names=False)

    ## Two types of sums are stored.
    normal_amps_sum = scenario.get_element_property_value(
        "Lines", "NormalAmpsSum", "Line.pvl_110")
    assert normal_amps_sum == 96 * 65.0
    scenario.get_element_property_value("Lines", "CurrentsSum", "Line.pvl_110")
    scenario.get_element_property_value("Circuits", "LossesSum",
                                        "Circuit.heco19021")

    sums_json = os.path.join(CUSTOM_EXPORTS_PROJECT_PATH, "Exports",
                             "scenario1", "element_property_values.json")
    assert os.path.exists(sums_json)
    data = load_data(sums_json)
    assert data

    pv_profiles = scenario.read_pv_profiles()
    assert pv_profiles["pv_systems"]
    for info in pv_profiles["pv_systems"]:
        assert isinstance(info["name"], str)
        assert isinstance(info["irradiance"], float)
        assert isinstance(info["pmpp"], float)
        assert isinstance(info["load_shape_profile"], str)
        assert isinstance(info["load_shape_pmult_sum"], float)
예제 #13
0
def run_test_project_by_property(tar_project, zip_project):
    project = PyDssProject.load_project(RUN_PROJECT_PATH)
    PyDssProject.run_project(
        RUN_PROJECT_PATH,
        tar_project=tar_project,
        zip_project=zip_project,
        simulation_file=SIMULATION_SETTINGS_FILENAME,
    )
    results = PyDssResults(RUN_PROJECT_PATH)
    assert len(results.scenarios) == 1
    assert results._hdf_store.attrs["version"] == DATA_FORMAT_VERSION
    scenario = results.scenarios[0]
    assert isinstance(scenario, PyDssScenarioResults)
    elem_classes = scenario.list_element_classes()
    expected_elem_classes = list(EXPECTED_ELEM_CLASSES_PROPERTIES.keys())
    expected_elem_classes.sort()
    assert elem_classes == expected_elem_classes
    for elem_class in elem_classes:
        expected_properties = EXPECTED_ELEM_CLASSES_PROPERTIES[elem_class]
        expected_properties.sort()
        properties = scenario.list_element_properties(elem_class)
        assert properties == expected_properties
        for prop in properties:
            element_names = scenario.list_element_names(elem_class, prop)
            for name in element_names:
                df = scenario.get_dataframe(elem_class, prop, name)
                assert isinstance(df, pd.DataFrame)
                assert len(df) == 96
            for name, df in scenario.iterate_dataframes(elem_class, prop):
                assert name in element_names
                assert isinstance(df, pd.DataFrame)

    # Test with an option.
    assert scenario.list_element_property_options(
        "Lines", "Currents") == ["phase_terminal"]
    df = scenario.get_dataframe("Lines",
                                "Currents",
                                "Line.sw0",
                                phase_terminal="A1")
    assert isinstance(df, pd.DataFrame)
    assert len(df) == 96
    assert len(df.columns) == 1
    step = datetime.timedelta(
        seconds=project.simulation_config["Project"]["Step resolution (sec)"])
    assert df.index[1] - df.index[0] == step

    df = scenario.get_dataframe("Lines",
                                "CurrentsMagAng",
                                "Line.sw0",
                                phase_terminal="A1",
                                mag_ang="mag")
    assert isinstance(df, pd.DataFrame)
    assert len(df) == 96
    assert len(df.columns) == 1

    df = scenario.get_dataframe("Lines",
                                "CurrentsMagAng",
                                "Line.sw0",
                                phase_terminal=None,
                                mag_ang="ang")
    assert isinstance(df, pd.DataFrame)
    assert len(df.columns) == 2
    assert len(df) == 96

    regex = re.compile(r"[ABCN]1")
    df = scenario.get_dataframe("Lines",
                                "Currents",
                                "Line.sw0",
                                phase_terminal=regex)
    assert isinstance(df, pd.DataFrame)
    assert len(df.columns) == 1
    assert len(df) == 96

    option_values = scenario.get_option_values("Lines", "Currents", "Line.sw0")
    assert option_values == ["A1", "A2"]

    prop = "Currents"
    full_df = scenario.get_full_dataframe("Lines", prop)
    assert len(full_df.columns) >= len(
        scenario.list_element_names("Lines", prop))
    for column in full_df.columns:
        assert "Unnamed" not in column
    assert len(full_df) == 96

    element_info_files = scenario.list_element_info_files()
    assert element_info_files
    for filename in element_info_files:
        df = scenario.read_element_info_file(filename)
        assert isinstance(df, pd.DataFrame)

    # Test the shortcut.
    df = scenario.read_element_info_file("PVSystems")
    assert isinstance(df, pd.DataFrame)

    cap_changes = scenario.read_capacitor_changes()
예제 #14
0
def test_custom_exports(cleanup_project):
    PyDssProject.run_project(
        CUSTOM_EXPORTS_PROJECT_PATH,
        simulation_file=SIMULATION_SETTINGS_FILENAME,
    )
    results = PyDssResults(CUSTOM_EXPORTS_PROJECT_PATH)
    assert len(results.scenarios) == 1
    scenario = results.scenarios[0]

    # Property stored at all time points.
    df = scenario.get_full_dataframe("Buses", "puVmagAngle")
    assert isinstance(df, pd.DataFrame)
    assert len(df) == 96

    # Property stored with a moving average.
    df = scenario.get_dataframe("Buses", "DistanceAvg", "t9")
    assert isinstance(df, pd.DataFrame)
    assert len(df) == int(96 / 5)
    for i, row in df.iterrows():
        assert round(row["t9__DistanceAvg"], 3) == 0.082

    transformers = scenario.list_element_names("Transformers")
    df = scenario.get_dataframe("Transformers", "CurrentsAvg", transformers[0])
    assert len(df) < 96

    df = scenario.get_dataframe("Lines", "LoadingPercentAvg", "Line.sl_22")
    assert len(df) == 2

    # Filtered value on custom function.
    df = scenario.get_dataframe("Lines", "LoadingPercent", "Line.sl_22")
    assert len(df) == 17

    # Subset of names. VoltagesMagAng has specific names, CurrentsMagAng has regex
    for name in ("Line.pvl_110", "Line.pvl_111", "Line.pvl_112",
                 "Line.pvl_113"):
        properties = scenario.list_element_properties("Lines",
                                                      element_name=name)
        assert "VoltagesMagAng" in properties
        assert "CurrentsMagAng" in properties

    properties = scenario.list_element_properties("Lines",
                                                  element_name="Line.SL_14")
    assert "VoltagesMagAng" not in properties
    assert "CurrentsMagAng" not in properties

    # Two types of sums are stored.
    normal_amps_sum = scenario.get_element_property_number(
        "Lines", "NormalAmpsSum", "Line.pvl_110")
    assert normal_amps_sum == 96 * 65.0
    scenario.get_element_property_number("Lines", "CurrentsSum",
                                         "Line.pvl_110")
    scenario.get_element_property_number("Circuits", "LossesSum",
                                         "Circuit.heco19021")

    sums_json = os.path.join(CUSTOM_EXPORTS_PROJECT_PATH, "Exports",
                             "scenario1", "element_property_numbers.json")
    assert os.path.exists(sums_json)
    data = load_data(sums_json)
    assert data

    pv_profiles = scenario.read_pv_profiles()
    assert pv_profiles["pv_systems"]
    for info in pv_profiles["pv_systems"]:
        assert isinstance(info["name"], str)
        assert isinstance(info["irradiance"], float)
        assert isinstance(info["pmpp"], float)
        assert isinstance(info["load_shape_profile"], str)
        assert isinstance(info["load_shape_pmult_sum"], float)
예제 #15
0
def _get_dataframe(path, elem_class, prop, name):
    results = PyDssResults(path)
    assert len(results.scenarios) == 1
    scenario = results.scenarios[0]
    return scenario.get_dataframe(elem_class, prop, name, real_only=True)
예제 #16
0
def verify_pv_reports(granularity):
    results = PyDssResults(PV_REPORTS_PROJECT_PATH)
    s_cm = results.scenarios[0]
    s_pf1 = results.scenarios[1]

    # This test data doesn't have changes for Capacitors or RegControls.
    capacitor_change_counts = results.read_report(
        "Capacitor State Change Counts")
    assert len(capacitor_change_counts["scenarios"]) == 2
    assert not capacitor_change_counts["scenarios"][1]["capacitors"]

    reg_control_change_counts = results.read_report(
        "RegControl Tap Number Change Counts")
    assert len(reg_control_change_counts["scenarios"]) == 2
    assert not reg_control_change_counts["scenarios"][1]["reg_controls"]

    if granularity in (
            ReportGranularity.PER_ELEMENT_PER_TIME_POINT,
            ReportGranularity.ALL_ELEMENTS_PER_TIME_POINT,
    ):
        clipping_name = os.path.join(PV_REPORTS_PROJECT_PATH, "Reports",
                                     "pv_clipping.h5")
        clipping = read_dataframe(clipping_name)
        curtailment_name = os.path.join(PV_REPORTS_PROJECT_PATH, "Reports",
                                        "pv_curtailment.h5")
        curtailment = read_dataframe(curtailment_name)
    else:
        clipping_name = os.path.join(PV_REPORTS_PROJECT_PATH, "Reports",
                                     "pv_clipping.json")
        clipping = load_data(clipping_name)
        curtailment_name = os.path.join(PV_REPORTS_PROJECT_PATH, "Reports",
                                        "pv_curtailment.json")
        curtailment = load_data(curtailment_name)

    total_cm_p1ulv53232_1_2_pv = 2237.4654
    total_cm_p1ulv57596_1_2_3_pv = 650.3959
    overall_total_cm = total_cm_p1ulv53232_1_2_pv + total_cm_p1ulv57596_1_2_3_pv
    total_pf1_p1ulv53232_1_2_pv = 2389.4002
    total_pf1_p1ulv57596_1_2_3_pv = 650.3996
    overall_total_pf1 = total_pf1_p1ulv53232_1_2_pv + total_pf1_p1ulv57596_1_2_3_pv
    if granularity == ReportGranularity.PER_ELEMENT_PER_TIME_POINT:
        df = s_cm.get_full_dataframe("PVSystems", "Powers")
        assert math.isclose(
            df["PVSystem.small_p1ulv53232_1_2_pv__Powers"].sum(),
            total_cm_p1ulv53232_1_2_pv,
            rel_tol=1e-04)
        assert math.isclose(
            df["PVSystem.small_p1ulv57596_1_2_3_pv__Powers"].sum(),
            total_cm_p1ulv57596_1_2_3_pv,
            rel_tol=1e-04)
        df = s_pf1.get_full_dataframe("PVSystems", "Powers")
        assert math.isclose(
            df["PVSystem.small_p1ulv53232_1_2_pv__Powers"].sum(),
            total_pf1_p1ulv53232_1_2_pv,
            rel_tol=1e-04)
        assert math.isclose(
            df["PVSystem.small_p1ulv57596_1_2_3_pv__Powers"].sum(),
            total_pf1_p1ulv57596_1_2_3_pv,
            rel_tol=1e-04)
    elif granularity == ReportGranularity.PER_ELEMENT_TOTAL:
        df = s_cm.get_full_dataframe("PVSystems", "PowersSum")
        assert math.isclose(
            df["PVSystem.small_p1ulv53232_1_2_pv__Powers"].values[0],
            total_cm_p1ulv53232_1_2_pv,
            rel_tol=1e-04)
        assert math.isclose(
            df["PVSystem.small_p1ulv57596_1_2_3_pv__Powers"].values[0],
            total_cm_p1ulv57596_1_2_3_pv,
            rel_tol=1e-04)
        df = s_pf1.get_full_dataframe("PVSystems", "PowersSum")
        assert math.isclose(
            df["PVSystem.small_p1ulv53232_1_2_pv__Powers"].values[0],
            total_pf1_p1ulv53232_1_2_pv,
            rel_tol=1e-04)
        assert math.isclose(
            df["PVSystem.small_p1ulv57596_1_2_3_pv__Powers"].values[0],
            total_pf1_p1ulv57596_1_2_3_pv,
            rel_tol=1e-04)
    elif granularity == ReportGranularity.ALL_ELEMENTS_TOTAL:
        assert math.isclose(s_cm.get_summed_element_total(
            "PVSystems", "PowersSum")['Total__Powers'],
                            overall_total_cm,
                            rel_tol=1e-04)
        assert math.isclose(s_pf1.get_summed_element_total(
            "PVSystems", "PowersSum")['Total__Powers'],
                            overall_total_pf1,
                            rel_tol=1e-04)
    elif granularity == ReportGranularity.ALL_ELEMENTS_PER_TIME_POINT:
        df = s_cm.get_summed_element_dataframe("PVSystems", "Powers")
        assert math.isclose(df["Total__Powers"].sum(),
                            overall_total_cm,
                            rel_tol=1e-04)
        df = s_pf1.get_summed_element_dataframe("PVSystems", "Powers")
        assert math.isclose(df["Total__Powers"].sum(),
                            overall_total_pf1,
                            rel_tol=1e-04)
예제 #17
0
    def run(self,
            logging_configured=True,
            tar_project=False,
            zip_project=False,
            dry_run=False):
        """Run all scenarios in the project."""
        if isinstance(self._fs_intf, PyDssArchiveFileInterfaceBase):
            raise InvalidConfiguration("cannot run from an archived project")
        if tar_project and zip_project:
            raise InvalidParameter(
                "tar_project and zip_project cannot both be True")
        if self._settings.project.dss_file == "":
            raise InvalidConfiguration(
                "a valid opendss file needs to be passed")

        inst = instance()
        if not logging_configured:
            if self._settings.logging.enable_console:
                console_level = logging.INFO
            else:
                console_level = logging.ERROR
            if self._settings.logging.enable_file:
                filename = os.path.join(self._project_dir, "Logs", "pydss.log")
            else:
                filename = None
            file_level = logging.INFO
            setup_logging(
                "PyDSS",
                filename=filename,
                console_level=console_level,
                file_level=file_level,
            )
        if dry_run:
            store_filename = os.path.join(tempfile.gettempdir(),
                                          STORE_FILENAME)
        else:
            store_filename = os.path.join(self._project_dir, STORE_FILENAME)
            self._dump_simulation_settings()

        driver = None
        if self._settings.exports.export_data_in_memory:
            driver = "core"
        if os.path.exists(store_filename):
            os.remove(store_filename)

        try:
            # This ensures that all datasets are flushed and closed after each
            # scenario. If there is an unexpected crash in a later scenario then
            # the file will still be valid for completed scenarios.
            for scenario in self._scenarios:
                with h5py.File(store_filename, mode="a",
                               driver=driver) as hdf_store:
                    self._hdf_store = hdf_store
                    self._hdf_store.attrs["version"] = DATA_FORMAT_VERSION
                    self._settings.project.active_scenario = scenario.name
                    inst.run(self._settings, self, scenario, dry_run=dry_run)
                    self._estimated_space[
                        scenario.name] = inst.get_estimated_space()

            export_tables = self._settings.exports.export_data_tables
            generate_reports = bool(self._settings.reports)
            if not dry_run and (export_tables or generate_reports):
                # Hack. Have to import here. Need to re-organize to fix.
                from PyDSS.pydss_results import PyDssResults
                results = PyDssResults(self._project_dir)
                if export_tables:
                    for scenario in results.scenarios:
                        scenario.export_data()

                if generate_reports:
                    results.generate_reports()

        except Exception:
            logger.exception("Simulation failed")
            raise

        finally:
            logging.shutdown()
            if tar_project:
                self._tar_project_files()
            elif zip_project:
                self._zip_project_files()

            if dry_run and os.path.exists(store_filename):
                os.remove(store_filename)