Esempio n. 1
0
def test_add_info_flows_storage_capacity():
    dict_test = {}
    flow = pd.Series([
        0, 2, 0, 2, 0, 2, 0, 2, 0, 2, 0, 2, 0, 2, 0, 2, 0, 2, 0, 2, 0, 2, 0, 2,
        0, 2
    ])
    E1.add_info_flows(evaluated_period=1,
                      dict_asset=dict_test,
                      flow=flow,
                      type=STORAGE_CAPACITY)
    for parameter in [
            FLOW, TOTAL_FLOW, ANNUAL_TOTAL_FLOW, PEAK_FLOW, AVERAGE_FLOW
    ]:
        assert (
            parameter in dict_test
        ), f"Parameter {parameter} should have been added to the dict_asset."
        if parameter == FLOW:
            assert_series_equal(
                dict_test[FLOW].astype(np.int64),
                flow,
                check_names=False,
            )
        else:
            assert (
                UNIT in dict_test[parameter]
            ), f"Parameter {parameter} should have been added to the dict_asset with an {VALUE}."
            assert (
                VALUE in dict_test[parameter]
            ), f"Parameter {parameter} should have been added to the dict_asset with an {VALUE}."
            assert (
                dict_test[parameter][VALUE] is None
            ), f"For {STORAGE_CAPACITY}, the parameter {parameter} should have 'None' as value. It is {dict_test[parameter][VALUE]}."
            assert (
                dict_test[parameter][UNIT] == "NaN"
            ), f"For {STORAGE_CAPACITY}, the parameter {parameter} should have 'NaN'  as unit. It is {dict_test[parameter][UNIT]}."
Esempio n. 2
0
def test_translate_optimizeCap_from_boolean_to_yes_no():
    # Not optimized:
    optimize = E1.translate_optimizeCap_from_boolean_to_yes_no(False)
    assert (optimize == "No"
            ), f"Without optimization, `no` should be returned but it is not."
    optimize = E1.translate_optimizeCap_from_boolean_to_yes_no(True)
    assert (optimize == "Yes"
            ), f"Without optimization, `yes` should be returned but it is not."
def test_get_parameter_to_be_evaluated_from_oemof_results():
    for asset_group in E1.ASSET_GROUPS_DEFINED_BY_INFLUX:
        param = E1.get_parameter_to_be_evaluated_from_oemof_results(
            asset_group, asset_label="a_label")
        assert param == INPUT_BUS_NAME
    for asset_group in E1.ASSET_GROUPS_DEFINED_BY_OUTFLUX:
        param = E1.get_parameter_to_be_evaluated_from_oemof_results(
            asset_group, asset_label="a_label")
        assert param == OUTPUT_BUS_NAME
def test_get_tuple_for_oemof_results():
    asset_label = "a_label"
    bus = "a_bus"
    for asset_group in E1.ASSET_GROUPS_DEFINED_BY_INFLUX:
        flux_tuple = E1.get_tuple_for_oemof_results(asset_label, asset_group,
                                                    bus)
        assert flux_tuple == (bus, asset_label)
    for asset_group in E1.ASSET_GROUPS_DEFINED_BY_OUTFLUX:
        flux_tuple = E1.get_tuple_for_oemof_results(asset_label, asset_group,
                                                    bus)
        assert flux_tuple == (asset_label, bus)
Esempio n. 5
0
def test_cut_below_micro_pd_Series_below_0_larger_threshold(caplog):
    value = pd.Series([0, -0.5 * E1.THRESHOLD, -1, 0])
    with caplog.at_level(logging.WARNING):
        result = E1.cut_below_micro(value=value, label="label")
    assert (
        "This is so far below 0, that the value is not changed" in caplog.text
    ), f"One value in pd.Series is below 0 and larger then the threshold, but no warning is displayed that this value may be invalid."
    assert (result == value).all(
    ), f"As value {value} is below 0 but larger then the threshold, its value should not be changed (but it is {result})."
Esempio n. 6
0
 def test_get_timeseries_per_bus_two_timeseries_for_directly_connected_storage(
         self):
     """A storage directly connected to a bus should have time series for input and output power."""
     with open(BUS_DATA_DUMP, "rb") as handle:
         bus_data = pickle.load(handle)
     # (('transformer_station_in', 'Electricity_bus'), 'flow')
     dict_values = {
         SIMULATION_SETTINGS: {
             TIME_INDEX: pd.date_range("2020-01-01", freq="H", periods=3)
         }
     }
     E1.get_timeseries_per_bus(dict_values=dict_values, bus_data=bus_data)
     # check updated dict_values
     df = dict_values[OPTIMIZED_FLOWS]["Electricity"]
     cols = [f"battery {i}" for i in [INPUT_POWER, OUTPUT_POWER]]
     assert {cols[0], cols[1]}.issubset(
         df.columns
     ), f"`E1.get_timeseries_per_bus()` should add input and output power time series of storage to `dict_values` also if it is connected directly to a bus."
Esempio n. 7
0
def test_cut_below_micro_scalar_value_below_0_larger_threshold(caplog):
    value = -1
    with caplog.at_level(logging.WARNING):
        result = E1.cut_below_micro(value=value, label="label")
    assert (
        "This is so far below 0, that the value is not changed" in caplog.text
    ), f"The value {value} is below 0 and larger then the threshold, but no warning is displayed that this value may be invalid."
    assert (
        result == value
    ), f"As value {value} is below 0 but larger then the threshold, its value should not be changed (but it is {result})."
Esempio n. 8
0
def test_cut_below_micro_scalar_value_below_0_smaller_threshold(caplog):
    value = -0.5 * E1.THRESHOLD
    with caplog.at_level(logging.DEBUG):
        result = E1.cut_below_micro(value=value, label="label")
    assert (
        "Negative value (s)" in caplog.text
    ), f"The value {value} is below 0 and below the threshold, but the log does not register a debug message for this."
    assert (
        result == 0
    ), f"As value {value} is below 0 but smaller then the threshold, its value should be changed to zero (but it is {result})."
Esempio n. 9
0
def test_cut_below_micro_scalar_value_larger_0_smaller_threshold(caplog):
    value = 0.5 * E1.THRESHOLD
    with caplog.at_level(logging.DEBUG):
        result = E1.cut_below_micro(value=value, label="label")
    assert (
        "The positive value" in caplog.text
    ), f"The value {value} is larger 0 but below the threshold and should raise a debug message."

    assert (
        result == 0
    ), f"As value {value} positive but smaller then the threshold, its value should be changed to zero (but it is {result})."
Esempio n. 10
0
def test_cut_below_micro_pd_Series_larger_0_smaller_threshold(caplog):
    value = pd.Series([0, 0.5 * E1.THRESHOLD, 0, 1])
    exp = pd.Series([0, 0, 0, 1])
    with caplog.at_level(logging.DEBUG):
        result = E1.cut_below_micro(value=value, label="label")
    assert (
        " positive values smaller then the threshold" in caplog.text
    ), f"One value in pd.Series is above 0 and below the threshold, but the log does not register a debug message for this."
    assert (
        result[1] == 0
    ), f"As value {value[1]} is below 0 but smaller then the threshold, its value should be changed to zero (but it is {result[1]})."
    assert (result == exp).all(
    ), f"One value in pd.Series is below 0 but smaller then the threshold, its value should be changed to zero (but it is {result})."
Esempio n. 11
0
def test_add_info_flows_365_days():
    dict_test = {}
    flow = pd.Series([
        0, 2, 0, 2, 0, 2, 0, 2, 0, 2, 0, 2, 0, 2, 0, 2, 0, 2, 0, 2, 0, 2, 0, 2,
        0, 2
    ])
    E1.add_info_flows(evaluated_period=365, dict_asset=dict_test, flow=flow)
    for parameter in [
            FLOW, TOTAL_FLOW, ANNUAL_TOTAL_FLOW, PEAK_FLOW, AVERAGE_FLOW
    ]:
        assert (
            parameter in dict_test
        ), f"Parameter {parameter} should have been added to the dict_asset."
        if parameter != FLOW:
            assert (
                UNIT in dict_test[parameter]
            ), f"Parameter {parameter} should have been added to the dict_asset with an {VALUE}."
            assert (
                VALUE in dict_test[parameter]
            ), f"Parameter {parameter} should have been added to the dict_asset with an {VALUE}."

    assert_series_equal(
        dict_test[FLOW].astype(np.int64),
        flow,
        check_names=False,
    )
    assert dict_test[TOTAL_FLOW][VALUE] == sum(
        flow
    ), f"The {TOTAL_FLOW} should be {sum(flow)}, but is {dict_test[TOTAL_FLOW][VALUE]}"
    assert dict_test[ANNUAL_TOTAL_FLOW][VALUE] == sum(
        flow
    ), f"The {ANNUAL_TOTAL_FLOW} should be {sum(flow)}, but is {dict_test[ANNUAL_TOTAL_FLOW][VALUE]}"
    assert dict_test[PEAK_FLOW][VALUE] == max(
        flow
    ), f"The {PEAK_FLOW} should be {max(flow)}, but is {dict_test[PEAK_FLOW][VALUE]}"
    assert (
        dict_test[AVERAGE_FLOW][VALUE] == flow.mean()
    ), f"The {AVERAGE_FLOW} should be {flow.mean()}, but is {dict_test[AVERAGE_FLOW][VALUE]}"
Esempio n. 12
0
def test_get_state_of_charge_info():
    flow = pd.Series([0, 2, 0, 2, 0, 2])
    dict_test = {
        STORAGE_CAPACITY: {
            FLOW: flow,
            INSTALLED_CAP: {
                VALUE: 1
            },
            OPTIMIZED_ADD_CAP: {
                VALUE: 1
            },
        }
    }
    E1.get_state_of_charge_info(dict_test)
    assert (TIMESERIES_SOC in dict_test
            ), f"Parameter {TIMESERIES_SOC} should be added to the dict."
    exp_timeseries_soc = pd.Series([0, 1, 0, 1, 0, 1])
    assert_series_equal(
        dict_test[TIMESERIES_SOC].astype(np.int64),
        exp_timeseries_soc,
        check_names=False,
    )
    assert (
        AVERAGE_SOC
        in dict_test), f"Parameter {AVERAGE_SOC} should be added to the dict."
    assert (
        VALUE in dict_test[AVERAGE_SOC]
    ), f"Parameter {AVERAGE_SOC} should be added to the dict with a {VALUE}."
    assert (
        UNIT in dict_test[AVERAGE_SOC]
    ), f"Parameter {AVERAGE_SOC} should be added to the dict with a {UNIT}."
    assert (
        dict_test[AVERAGE_SOC][VALUE] == 0.5
    ), f"Parameter {AVERAGE_SOC} should have {VALUE} 0.5 but has {dict_test[AVERAGE_SOC][VALUE] }."
    assert (
        dict_test[AVERAGE_SOC][UNIT] == "factor"
    ), f"Parameter {AVERAGE_SOC} should have {UNIT} 'factor' but has {dict_test[AVERAGE_SOC][UNIT]}"
def evaluate_dict(dict_values, results_main, results_meta):
    """

    Parameters
    ----------
    dict_values: dict
        simulation parameters
    results_main: DataFrame
        oemof simulation results as output by processing.results()
    results_meta: DataFrame
        oemof simulation meta information as output by processing.meta_results()

    Returns
    -------

    """

    dict_values.update(
        {
            KPI: {
                KPI_COST_MATRIX: pd.DataFrame(columns=KPI_COST_MATRIX_ENTRIES),
                KPI_SCALAR_MATRIX: pd.DataFrame(columns=KPI_SCALAR_MATRIX_ENTRIES),
                KPI_SCALARS_DICT: {},
            }
        }
    )

    bus_data = {}
    # Store all information related to busses in bus_data
    for bus in dict_values[ENERGY_BUSSES]:
        # Read all energy flows from busses
        bus_data.update({bus: solph.views.node(results_main, bus)})

    logging.info("Evaluating optimized capacities and dispatch.")
    # Evaluate timeseries and store to a large DataFrame for each bus:
    E1.get_timeseries_per_bus(dict_values, bus_data)

    # Store all information related to storages in bus_data, as storage capacity acts as a bus
    for storage in dict_values[ENERGY_STORAGE]:
        bus_data.update(
            {
                dict_values[ENERGY_STORAGE][storage][LABEL]: solph.views.node(
                    results_main, dict_values[ENERGY_STORAGE][storage][LABEL],
                )
            }
        )
        E1.get_storage_results(
            dict_values[SIMULATION_SETTINGS],
            bus_data[dict_values[ENERGY_STORAGE][storage][LABEL]],
            dict_values[ENERGY_STORAGE][storage],
        )

        for storage_item in [STORAGE_CAPACITY, INPUT_POWER, OUTPUT_POWER]:
            E2.get_costs(
                dict_values[ENERGY_STORAGE][storage][storage_item],
                dict_values[ECONOMIC_DATA],
            )

        E2.lcoe_assets(dict_values[ENERGY_STORAGE][storage], ENERGY_STORAGE)
        for storage_item in [STORAGE_CAPACITY, INPUT_POWER, OUTPUT_POWER]:
            store_result_matrix(
                dict_values[KPI], dict_values[ENERGY_STORAGE][storage][storage_item]
            )

        if (
            dict_values[ENERGY_STORAGE][storage][INPUT_BUS_NAME]
            in dict_values[OPTIMIZED_FLOWS].keys()
        ) or (
            dict_values[ENERGY_STORAGE][storage][OUTPUT_BUS_NAME]
            in dict_values[OPTIMIZED_FLOWS].keys()
        ):
            bus_name = dict_values[ENERGY_STORAGE][storage][INPUT_BUS_NAME]
            timeseries_name = (
                dict_values[ENERGY_STORAGE][storage][LABEL]
                + " ("
                + str(
                    round(
                        dict_values[ENERGY_STORAGE][storage][STORAGE_CAPACITY][
                            OPTIMIZED_ADD_CAP
                        ][VALUE],
                        1,
                    )
                )
                + dict_values[ENERGY_STORAGE][storage][STORAGE_CAPACITY][
                    OPTIMIZED_ADD_CAP
                ][UNIT]
                + ") SOC"
            )

            dict_values[OPTIMIZED_FLOWS][bus_name][timeseries_name] = dict_values[
                ENERGY_STORAGE
            ][storage]["timeseries_soc"]

    for group in [ENERGY_CONVERSION, ENERGY_PRODUCTION, ENERGY_CONSUMPTION]:
        for asset in dict_values[group]:
            E1.get_results(
                settings=dict_values[SIMULATION_SETTINGS],
                bus_data=bus_data,
                dict_asset=dict_values[group][asset],
                asset_group=group,
            )
            E2.get_costs(dict_values[group][asset], dict_values[ECONOMIC_DATA])
            E2.lcoe_assets(dict_values[group][asset], group)
            store_result_matrix(dict_values[KPI], dict_values[group][asset])

    logging.info("Evaluating key performance indicators of the system")
    E3.all_totals(dict_values)
    E3.total_demand_and_excess_each_sector(dict_values)
    E3.add_total_feedin_electricity_equivaluent(dict_values)
    E3.add_levelized_cost_of_energy_carriers(dict_values)
    E3.add_total_renewable_and_non_renewable_energy_origin(dict_values)
    E3.add_renewable_share_of_local_generation(dict_values)
    E3.add_renewable_factor(dict_values)
    # E3.add_degree_of_sector_coupling(dict_values) feature not finished
    E3.add_onsite_energy_fraction(dict_values)
    E3.add_onsite_energy_matching(dict_values)
    E3.add_degree_of_autonomy(dict_values)

    # Tests and checks
    logging.info("Running validity checks.")
    E4.minimal_renewable_share_test(dict_values)
    E4.detect_excessive_excess_generation_in_bus(dict_values)
Esempio n. 14
0
def test_cut_below_micro_scalar_value_0():
    value = 0
    result = E1.cut_below_micro(value=value, label="label")
    assert (
        result == value
    ), f"The value {value} is 0 and should not be changed (but it is {result})."
Esempio n. 15
0
def test_cut_below_micro_scalar_value_larger_0():
    value = 1
    result = E1.cut_below_micro(value=value, label="label")
    assert (
        result == value
    ), f"The value {value} is larger 0 by more than the threshold and therefore should not be changed (but it is {result})."
Esempio n. 16
0
def test_cut_below_micro_pd_Series_0():
    value = pd.Series([0, 0, 0, 1])
    result = E1.cut_below_micro(value=value, label="label")
    assert (result == value).all(), (
        f"One value in pd.Series is 0 and should not be changed (but it is {result})."
    )
Esempio n. 17
0
def test_convert_components_to_dataframe():
    pv = "PV"
    diesel = "diesel"
    storage = "storage"
    generator = "genset"
    dict_components = {
        # 2 examples energy production assets, as this does not seem to work currently
        ENERGY_PRODUCTION: {
            pv: {
                OEMOF_ASSET_TYPE: OEMOF_SOURCE,
                ENERGY_VECTOR: "vector",
                UNIT: UNIT,
                INSTALLED_CAP: {
                    VALUE: 1
                },
                OPTIMIZE_CAP: {
                    VALUE: True
                },
            },
            diesel: {
                OEMOF_ASSET_TYPE: OEMOF_SOURCE,
                ENERGY_VECTOR: "vector",
                UNIT: UNIT,
                INSTALLED_CAP: {
                    VALUE: 1
                },
                OPTIMIZE_CAP: {
                    VALUE: True
                },
            },
        },
        # Example for energy conversion asset, not optimized
        ENERGY_CONVERSION: {
            generator: {
                OEMOF_ASSET_TYPE: OEMOF_TRANSFORMER,
                ENERGY_VECTOR: "vector",
                UNIT: UNIT,
                INSTALLED_CAP: {
                    VALUE: 1
                },
                OPTIMIZE_CAP: {
                    VALUE: False
                },
            }
        },
        # Example for energy storage asset
        ENERGY_STORAGE: {
            storage: {
                OPTIMIZE_CAP: {
                    VALUE: True
                },
                OEMOF_ASSET_TYPE: OEMOF_GEN_STORAGE,
                ENERGY_VECTOR: "vector",
                INPUT_POWER: {
                    LABEL: storage + INPUT_POWER,
                    INSTALLED_CAP: {
                        VALUE: 1,
                        UNIT: UNIT
                    },
                },
                OUTPUT_POWER: {
                    LABEL: storage + OUTPUT_POWER,
                    INSTALLED_CAP: {
                        VALUE: 1,
                        UNIT: UNIT
                    },
                },
                STORAGE_CAPACITY: {
                    LABEL: storage + STORAGE_CAPACITY,
                    INSTALLED_CAP: {
                        VALUE: 1,
                        UNIT: UNIT
                    },
                },
            }
        },
    }

    df_comp = E1.convert_components_to_dataframe(dict_components)

    for parameter in [
            "Type of Component",
            "Energy Vector",
            UNIT,
            "Installed Capacity",
            "Capacity optimization",
    ]:
        assert (
            parameter in df_comp.columns
        ), f"Parameter {parameter} has not been added as a column to the table to be printed in the autoreport."

    for component in [
            pv,
            diesel,
            generator,
            storage + INPUT_POWER,
            storage + OUTPUT_POWER,
            storage + STORAGE_CAPACITY,
    ]:
        assert (
            component in df_comp["Component"].values
        ), f"Asset {component} is not included in the table to be printed in the autoreport."

    for row in range(0, len(df_comp)):
        if df_comp.iloc[row,
                        df_comp.columns.get_loc("Component")] == generator:
            assert (
                df_comp.iloc[row,
                             df_comp.columns.get_loc("Capacity optimization")]
                == "No"
            ), f"The {generator} is not being capacity optimized, so `Capacity optimization` should be `No`, which is not the case."
        else:
            assert (
                df_comp.iloc[row,
                             df_comp.columns.get_loc("Capacity optimization")]
                == "Yes"
            ), f"The {df_comp.iloc[row,df_comp.columns.get_loc('Component')]} is being capacity optimized, so `Capacity optimization` should be `Yes`, which is not the case."
Esempio n. 18
0
def test_cut_below_micro_pd_Series_larger_0():
    value = pd.Series([1, 2, 3, 4])
    result = E1.cut_below_micro(value=value, label="label")
    assert (result == value).all(
    ), f"All values in pd.Series are larger 0 by more than the threshold and therefore should not be changed (but it is {result})."