def test_benchmark_feedin_tariff_optimize_positive_value(self, margs):
        r"""
        Benchmark test for feed-in in a simple invest case with grid connected PV and positive feed-in tariff (earn by feed-in).
        """
        use_case = "Feedin_optimize"
        main(
            overwrite=True,
            display_output="warning",
            path_input_folder=os.path.join(TEST_INPUT_PATH, use_case),
            input_type=CSV_EXT,
            path_output_folder=os.path.join(TEST_OUTPUT_PATH, use_case,
                                            "positive_value"),
        )
        # get results
        df_busses_flow, cost_matrix, scalar_matrix, scalars = self.get_results(
            os.path.join(use_case, "positive_value"))

        # at a high positive feed-in tariff and moderate specific PV costs the maximum
        # capacity of PV is installed, while feed-in takes place and excess is zero
        optimized_added_cap = scalar_matrix[OPTIMIZED_ADD_CAP]["pv_plant_01"]
        assert (
            optimized_added_cap == 5000
        ), f"At a high positive feed-in tariff and moderate specific PV costs the maximum PV capacity should be installed (5000 kWp), however {optimized_added_cap} kWp is installed."
        excess_sum = df_busses_flow[EXCESS_SINK_NAME].sum()
        assert (
            excess_sum == 0
        ), f"When the feed-in tariff is positive there should be no electricity excess, however the sum of the excess time series is {excess_sum}"
Ejemplo n.º 2
0
    def renewable_factor_and_renewable_share_of_local_generation(self, margs):
        r"""
        Benchmark test that checks the calculation of
        * TOTAL_NON_RENEWABLE_GENERATION_IN_LES
        * TOTAL_RENEWABLE_GENERATION_IN_LES
        * TOTAL_NON_RENEWABLE_ENERGY_USE
        * TOTAL_RENEWABLE_ENERGY_USE
        * RENEWABLE_FACTOR
        * RENEWABLE_SHARE_OF_LOCAL_GENERATION
        For one sector, with only grid and PV present. Uses the simple scenarios for MVS testing as an input.
        """
        use_case = "AB_grid_PV"
        main(
            overwrite=True,
            display_output="warning",
            path_input_folder=os.path.join(TEST_INPUT_PATH, use_case),
            input_type=CSV_EXT,
            path_output_folder=os.path.join(TEST_OUTPUT_PATH, use_case),
        )
        # Check for RENEWABLE_FACTOR and RENEWABLE_SHARE_OF_LOCAL_GENERATION:
        with open(
                os.path.join(TEST_OUTPUT_PATH, use_case,
                             "json_with_results.json"), "r") as results:
            data = json.load(results)

        # Get total flow of PV
        total_res_local = data[ENERGY_PRODUCTION]["pv_plant_01"][TOTAL_FLOW][
            VALUE]
        # Get total demand
        total_demand = data[KPI][KPI_SCALARS_DICT][
            TOTAL_DEMAND + SUFFIX_ELECTRICITY_EQUIVALENT]
        assert (
            data[KPI][KPI_SCALARS_DICT][TOTAL_RENEWABLE_GENERATION_IN_LES] ==
            total_res_local
        ), f"The total renewable generation is not equal to the generation of the PV system."
        assert (
            data[KPI][KPI_SCALARS_DICT][TOTAL_NON_RENEWABLE_GENERATION_IN_LES]
            == 0
        ), f"There is no local non-renewable generation asset, but there seems to be a non-renewable production."
        assert (data[KPI][KPI_SCALARS_DICT][TOTAL_RENEWABLE_ENERGY_USE] ==
                total_res_local
                ), f"There is another renewable energy source apart from PV."
        assert (
            data[KPI][KPI_SCALARS_DICT][TOTAL_NON_RENEWABLE_ENERGY_USE] ==
            total_demand - total_res_local
        ), "The non-renewable energy use was expected to be all grid supply, but this does not hold true."
        assert (
            data[KPI][KPI_SCALARS_DICT][RENEWABLE_FACTOR] == total_res_local /
            total_demand), f"The {RENEWABLE_FACTOR} is not as expected."
        assert (data[KPI][KPI_UNCOUPLED_DICT][RENEWABLE_FACTOR]["Electricity"]
                == total_res_local /
                total_demand), f"The {RENEWABLE_FACTOR} is not as expected."
        assert (
            data[KPI][KPI_SCALARS_DICT][RENEWABLE_SHARE_OF_LOCAL_GENERATION] ==
            1
        ), f"The {RENEWABLE_SHARE_OF_LOCAL_GENERATION} is not as expected."
        assert (
            data[KPI][KPI_UNCOUPLED_DICT][RENEWABLE_SHARE_OF_LOCAL_GENERATION]
            ["Electricity"] == 1
        ), f"The {RENEWABLE_SHARE_OF_LOCAL_GENERATION} is not as expected."
Ejemplo n.º 3
0
    def test_benchmark_AB_grid_pv(self, margs):
        r"""
        Benchmark test for simple case grid connected PV. Since the PV is already installed, this tests makes sure that the PV generation is totally used to supply the load and the rest in take from the grid.
        """
        use_case = "AB_grid_PV"
        main(
            overwrite=True,
            display_output="warning",
            path_input_folder=os.path.join(TEST_INPUT_PATH, use_case),
            input_type=CSV_EXT,
            path_output_folder=os.path.join(TEST_OUTPUT_PATH, use_case),
        )

        df_busses_flow = pd.read_excel(
            os.path.join(TEST_OUTPUT_PATH, use_case,
                         "timeseries_all_busses.xlsx"),
            sheet_name=bus_suffix("Electricity"),
        )
        # make the time the index
        df_busses_flow = df_busses_flow.set_index("Unnamed: 0")
        # compute the sum of the in and out of the electricity bus
        df_busses_flow["net_sum"] = df_busses_flow.sum(axis=1)

        # make sure the sum of the bus flow is always zero (there are rounding errors)
        assert df_busses_flow.net_sum.map(lambda x: 0
                                          if x < 1e-4 else 1).sum() == 0
Ejemplo n.º 4
0
    def test_benchmark_minimal_degree_of_autonomy_constraint(self, margs):
        r"""
        Notes
        -----
        With this benchmark test, the minimal degree of autonomy constraint is validated.
        constraint_minimal_degree_of_autonomy_0 does not have a minimal degree of autonomy.
        Constraint_minimal_degree_of_autonomy_70 has a minimal degree of autonomy of 70%.
        If the degree of autonomy of constraint_minimal_renewable_share_0 is lower than 70%,
        but the one of constraint_minimal_renewable_share_70 is 70%, then the benchmark test passes.
        """

        # define the two cases needed for comparison (no minimal renewable factor) and (minimal renewable factor of 70%)
        use_case = [
            "constraint_degree_of_autonomy_0",
            "constraint_degree_of_autonomy_70",
        ]
        # define an empty dictionary for excess electricity
        degree_of_autonomy = {}
        minimal_degree_of_autonomy = {}
        for case in use_case:
            main(
                overwrite=True,
                display_output="warning",
                path_input_folder=os.path.join(TEST_INPUT_PATH, case),
                input_type=CSV_EXT,
                path_output_folder=os.path.join(TEST_OUTPUT_PATH, case),
            )
            data = load_json(
                os.path.join(TEST_OUTPUT_PATH, case,
                             JSON_WITH_RESULTS + JSON_FILE_EXTENSION))
            degree_of_autonomy.update(
                {case: data[KPI][KPI_SCALARS_DICT][DEGREE_OF_AUTONOMY]})
Ejemplo n.º 5
0
    def test_benchmark_minimal_renewable_share_constraint(self, margs):
        r"""
        Notes
        -----
        With this benchmark test, the minimal renewable factor constraint is validated.
        Constraint_minimal_renewable_share_0 does not have a minimal renewable factor.
        Constraint_minimal_renewable_share_50 has a minimal renewable factor of 70%.
        If the renewable share of Constraint_minimal_renewable_share_0 is lower than 70%,
        but the one of Constraint_minimal_renewable_share_50 is 70%, then the benchmark test passes.
        """

        # define the two cases needed for comparison (no minimal renewable factor) and (minimal renewable factor of 70%)
        use_case = [
            "Constraint_minimal_renewable_share_0",
            "Constraint_minimal_renewable_share_70",
        ]
        # define an empty dictionary for excess electricity
        renewable_shares = {}
        minimal_renewable_shares = {}
        for case in use_case:
            main(
                overwrite=True,
                display_output="warning",
                path_input_folder=os.path.join(TEST_INPUT_PATH, case),
                input_type=CSV_EXT,
                path_output_folder=os.path.join(TEST_OUTPUT_PATH, case),
            )
            data = load_json(
                os.path.join(TEST_OUTPUT_PATH, case,
                             JSON_WITH_RESULTS + JSON_FILE_EXTENSION),
                flag_missing_values=False,
            )
            renewable_shares.update(
                {case: data[KPI][KPI_SCALARS_DICT][RENEWABLE_FACTOR]})
            minimal_renewable_shares.update(
Ejemplo n.º 6
0
    def test_benchmark_AD_grid_diesel(self, margs):
        r"""
        Benchmark test for using a diesel generator with the electricity grid. In this benchmark test, the LCOE of the diesel generator is made less than the grid price and so it is solely used to supply the load.
        """
        use_case = "AD_grid_diesel"
        main(
            overwrite=True,
            display_output="warning",
            path_input_folder=os.path.join(TEST_INPUT_PATH, use_case),
            input_type=CSV_EXT,
            path_output_folder=os.path.join(TEST_OUTPUT_PATH, use_case),
        )

        # read json with results file
        data = load_json(
            os.path.join(TEST_OUTPUT_PATH, use_case,
                         JSON_WITH_RESULTS + JSON_FILE_EXTENSION),
            flag_missing_values=False,
        )

        # make sure LCOE_diesel is less than grid price, so that below test makes sense
        assert (data[ENERGY_CONVERSION]["diesel_generator"][LCOE_ASSET][VALUE]
                < data[ENERGY_PROVIDERS]["DSO"]["energy_price"][VALUE])

        # make sure grid is not used, ie. that diesel generator supplies all demand
        diesel_generator = data[ENERGY_CONVERSION]["diesel_generator"][FLOW]
        demand = data[ENERGY_CONSUMPTION]["demand_01"][FLOW]
        assert sum(diesel_generator) == approx(sum(demand), rel=1e-3)
Ejemplo n.º 7
0
 def test_benchmark_ABE_grid_pv_bat(self, margs):
     r"""
     Benchmark test for using a grid connected PV system with storage. In this case, the excess production should be used to charge the battery.
     """
     # define the two cases needed for comparison (grid + PV) and (grid + PV + battery)
     use_case = ["AB_grid_PV", "ABE_grid_PV_battery"]
     # define an empty dictionary for excess electricity
     excess = {}
     for case in use_case:
         main(
             overwrite=True,
             display_output="warning",
             path_input_folder=os.path.join(TEST_INPUT_PATH, case),
             input_type=CSV_EXT,
             path_output_folder=os.path.join(TEST_OUTPUT_PATH, case),
         )
         busses_flow = pd.read_excel(
             os.path.join(TEST_OUTPUT_PATH, case,
                          "timeseries_all_busses.xlsx"),
             sheet_name="Electricity",
         )
         # compute the sum of the excess electricity for all timesteps
         excess[case] = sum(busses_flow["Electricity" + EXCESS_SINK])
     # compare the total excess electricity between the two cases
     assert excess["AB_grid_PV"] < excess["ABE_grid_PV_battery"]
Ejemplo n.º 8
0
def test_error_raise_MVSOemofError_if_solver_could_not_finish_simulation(
        margs):
    use_case = os.path.join("test_data", "known_oemof_errors",
                            "insufficient_capacity")
    with pytest.raises(MVSOemofError):
        main(
            overwrite=True,
            display_output="warning",
            path_input_folder=os.path.join(TEST_REPO_PATH, use_case),
            input_type=CSV_EXT,
            path_output_folder=os.path.join(TEST_OUTPUT_PATH, use_case),
        )
    def test_benchmark_feedin_tariff_dispatch_negative_value(self, margs):
        r"""
        Benchmark test for feed-in in a simple dispatch case with grid connected PV and negative feed-in tariff (pay for feed-in).
        """
        use_case = "Feedin_dispatch"

        # set feed-in tariff to negative value
        filename = os.path.join(TEST_INPUT_PATH, use_case, CSV_ELEMENTS,
                                f"{ENERGY_PROVIDERS}.csv")
        df = pd.read_csv(filename).set_index("Unnamed: 0")
        df["DSO"][FEEDIN_TARIFF] = -float(df["DSO"][FEEDIN_TARIFF])
        df.to_csv(filename)

        main(
            overwrite=True,
            display_output="warning",
            path_input_folder=os.path.join(TEST_INPUT_PATH, use_case),
            input_type=CSV_EXT,
            path_output_folder=os.path.join(TEST_OUTPUT_PATH, use_case,
                                            "negative_value"),
        )

        # reset feed-in tariff just in case
        df["DSO"][FEEDIN_TARIFF] = -float(df["DSO"][FEEDIN_TARIFF])
        df.to_csv(filename)

        df_busses_flow, cost_matrix, scalar_matrix, scalars = self.get_results(
            os.path.join(use_case, "negative_value"))

        # at a negative feed-in tariff no feed into the grid should take place
        feedin = df_busses_flow[FEEDIN].sum()
        total_feedin_scalar = scalar_matrix[TOTAL_FLOW][FEEDIN]
        assert (
            feedin == 0
        ), f"When the feed-in tariff is negative there should be no feed into the grid, however the sum of the feed-in time series is {feedin}"

        assert (
            total_feedin_scalar == 0
        ), f"When the feed-in tariff is negative there should be no feed into the grid, however the scalar matrix shows feed-in of {total_feedin_scalar}"

        # costs of DSO feed-in sink in scalars.xlsx should be zero.
        assert (
            cost_matrix[COST_TOTAL][FEEDIN] == 0
            and cost_matrix[COST_OPERATIONAL_TOTAL][FEEDIN] == 0
            and cost_matrix[COST_DISPATCH][FEEDIN] == 0
            and cost_matrix[LCOE_ASSET][FEEDIN] == 0
        ), f"When the feed-in tariff is negative the costs of the feed-in should be zero, as no feed-in takes place (scalar_matrix: {COST_TOTAL}, {COST_OPERATIONAL_TOTAL}, {COST_DISPATCH}, {LCOE_ASSET})."
 def test_benchmark_AFG_grid_heatpump_heat(self, margs):
     r"""
     Benchmark test for a sector coupled energy system, including electricity and heat demand. A heat pump is used as a sector coupling asset. Both an electricity and heat DSO are present. The electricity tariff is defined as a time series. The heat pump is only used when its cost (energy_price/efficiency) is less than the heat DSO price.
     """
     use_case = "AFG_grid_heatpump_heat"
     main(
         overwrite=True,
         display_output="warning",
         path_input_folder=os.path.join(TEST_INPUT_PATH, use_case),
         input_type=CSV_EXT,
         path_output_folder=os.path.join(TEST_OUTPUT_PATH, use_case),
     )
     # read json with results file
     data = load_json(
         os.path.join(
             TEST_OUTPUT_PATH, use_case, JSON_WITH_RESULTS + JSON_FILE_EXTENSION
         ),
         flag_missing_values=False,
     )
     # read excel sheet with time series
     busses_flow = pd.read_excel(
         os.path.join(TEST_OUTPUT_PATH, use_case, "timeseries_all_busses.xlsx"),
         sheet_name="Heat",
     )
     # create dict with electricity prices
     electricity_price = data[ENERGY_PROVIDERS]["Grid_DSO"][ENERGY_PRICE][
         VALUE
     ].values
     # compare cost of using heat pump with electricity price to heat price
     cost_of_using_heatpump = "electricity_price[i] / data[ENERGY_CONVERSION]['heat_pump'][EFFICIENCY][VALUE] comp.data[ENERGY_PROVIDERS]['Heat_DSO'][ENERGY_PRICE][VALUE]"
     cost_of_using_heat_dso = (
         "data[ENERGY_PROVIDERS]['Heat_DSO'][ENERGY_PRICE][VALUE]"
     )
     for i in range(0, len(electricity_price)):
         if (
             electricity_price[i]
             / data[ENERGY_CONVERSION]["heat_pump"][EFFICIENCY][VALUE]
             > data[ENERGY_PROVIDERS]["Heat_DSO"][ENERGY_PRICE][VALUE]
         ):
             assert busses_flow["Heat_DSO_consumption_period"][i] == approx(
                 abs(busses_flow["demand_heat"][i])
             ), f"Even though the marginal costs to use the heat pump are higher than the heat DSO price with {cost_of_using_heatpump} comp. {cost_of_using_heat_dso}, the heat DSO is not solely used for energy supply."
         else:
             assert busses_flow["heat_pump"][i] == approx(
                 abs(busses_flow["demand_heat"][i])
             )
    def test_benchmark_feedin_tariff_dispatch_positive_value(self, margs):
        r"""
        Benchmark test for feed-in in a simple dispatch case with grid connected PV and positive feed-in tariff (earn by feed-in).
        """
        use_case = "Feedin_dispatch"
        main(
            overwrite=True,
            display_output="warning",
            path_input_folder=os.path.join(TEST_INPUT_PATH, use_case),
            input_type=CSV_EXT,
            path_output_folder=os.path.join(TEST_OUTPUT_PATH, use_case,
                                            "positive_value"),
        )

        df_busses_flow, cost_matrix, scalar_matrix, scalars = self.get_results(
            os.path.join(use_case, "positive_value"))

        # at a positive feed-in tariff all production exceeding the demand should be
        # fed into the grid --> excess is zero
        excess_sum = df_busses_flow[EXCESS_SINK_NAME].sum()
        total_excess_scalar = scalar_matrix[TOTAL_FLOW][EXCESS_SINK_NAME]
        assert (
            excess_sum == 0
        ), f"When the feed-in tariff is positive there should be no electricity excess, however the sum of the excess time series is {excess_sum}"

        assert (
            total_excess_scalar == 0
        ), f"When the feed-in tariff is positive there should be no electricity excess, however the scalar matrix shows an excess of {total_excess_scalar}"

        # costs of DSO feed-in sink in scalars.xlsx should be negative, while they
        # should be substracted from the summed-up costs of the whole system
        # negative costs in cost_matrix:
        assert (
            cost_matrix[COST_TOTAL][FEEDIN] < 0
            and cost_matrix[COST_OPERATIONAL_TOTAL][FEEDIN] < 0
            and cost_matrix[COST_DISPATCH][FEEDIN] < 0
            and cost_matrix[LCOE_ASSET][FEEDIN] < 0
        ), f"When the feed-in tariff is positive the costs of the feed-in should be negative (scalar_matrix: {COST_TOTAL}, {COST_OPERATIONAL_TOTAL}, {COST_DISPATCH}, {LCOE_ASSET})."
        # costs substracted from total costs:
        total_costs_feedin = cost_matrix[COST_TOTAL][FEEDIN]
        total_costs_consumption = cost_matrix[COST_TOTAL][CONSUMPTION]
        total_costs_all_assets = scalars.loc[COST_TOTAL][0]
        assert (
            total_costs_all_assets == total_costs_feedin +
            total_costs_consumption
        ), f"When the feed-in tariff is positive the costs of the feed-in should be substracted from the total cost (scalars)."
Ejemplo n.º 12
0
    def test_benchmark_maximum_emissions_constraint(self, margs):
        r"""
        Tests the maximum emissions constraint in a system with PV, DSO and a diesel generator.
        The şystem defined in `\Constraint_maximum_emissions_None` does not have maximum emissions constraint,
        while the system defined in `\Constraint_maximum_emissions_low` has a low maximum emissions constraint of 800 kgCO2eq/a.
        A third system, `\Constraint_maximum_emissions_low_grid_RE_100`, includes a renewable share in the grid of 100 %.

        The following checks are made:
        - total emissions of energy system <= maximum emissions constraint
            (for Constraint_maximum_emissions_low and Constraint_maximum_emissions_low_grid_RE_100)
        - total emissions of case without constraint > total emissions of case with constraint
        - specific emissions eleq of case without constraint > specific emissions eleq of case with constraint
        - optimized added pv capacity lower for case without constraint than for case with constraint
        - total flow of grid consumption higher for case with 100 % RE share in grid than for case with emissions from grid

        """
        # define the two cases needed for comparison
        use_case = [
            "Constraint_maximum_emissions_None",
            "Constraint_maximum_emissions_low",
            "Constraint_maximum_emissions_low_grid_RE_100",
        ]
        # define an empty dictionary for excess electricity
        total_emissions = {}
        maximum_emissions = {}
        specific_emissions_eleq = {}
        pv_capacities = {}
        grid_total_flows = {}
        for case in use_case:
            main(
                overwrite=True,
                display_output="warning",
                path_input_folder=os.path.join(TEST_INPUT_PATH, case),
                input_type=CSV_EXT,
                path_output_folder=os.path.join(TEST_OUTPUT_PATH, case),
            )
            data = load_json(
                os.path.join(TEST_OUTPUT_PATH, case,
                             JSON_WITH_RESULTS + JSON_FILE_EXTENSION),
                flag_missing_values=False,
            )
            total_emissions.update(
                {case: data[KPI][KPI_SCALARS_DICT][TOTAL_EMISSIONS]})
Ejemplo n.º 13
0
    def test_net_zero_energy_constraint(self, margs):
        r"""
        Notes
        -----
        With this benchmark test, the net zero energy (NZE) constraint is validated in a
        single sector LES and a sector coupled LES.
        Constraint_net_zero_energy_False contains a single sector LES without NZE constraint.
        Constraint_net_zero_energy_true contains a single sector LES with NZE constraint.
        Constraint_net_zero_energy_sector_coupled_False contains a sector-coupled LES without NZE constraint.
        Constraint_net_zero_energy_sector_coupled_true contains a sector-coupled LES with NZE constraint.
        The benchmark test passes if the degree of NZE of the defined energy systems
        without constraint is lower than one and if the degree of NZE of the energy
        systems with constraint equals one or is greater than one.
        For the sector-coupled energy system, instead of the degree of NZE the balance
        between grid feed-in and consumption is used for the assertion to avoid problems
        with energy weighting.

        """
        # define the cases needed for comparison
        use_case = [
            "Constraint_net_zero_energy_False",
            "Constraint_net_zero_energy_true",
            "Constraint_net_zero_energy_sector_coupled_False",
            "Constraint_net_zero_energy_sector_coupled_true",
        ]
        # define empty dictionaries degree of NZE
        degree_of_nze = {}
        consumption_from_grid = {}
        feedin_to_grid = {}
        for case in use_case:
            main(
                overwrite=True,
                display_output="warning",
                path_input_folder=os.path.join(TEST_INPUT_PATH, case),
                input_type=CSV_EXT,
                path_output_folder=os.path.join(TEST_OUTPUT_PATH, case),
            )
            data = load_json(
                os.path.join(TEST_OUTPUT_PATH, case,
                             JSON_WITH_RESULTS + JSON_FILE_EXTENSION))
            degree_of_nze.update(
                {case: data[KPI][KPI_SCALARS_DICT][DEGREE_OF_NZE]})
    def test_benchmark_feature_parameters_as_timeseries(self, margs):
        r"""
        Notes
        -----
        This benchmark test checks if a scalar value can be provided as a timeseries within a csv file.
        It also checks whether these timeseries can be provided within a single csv file.
        """
        use_case = "Feature_parameters_as_timeseries"

        # Execute the script
        main(
            overwrite=True,
            display_output="warning",
            path_input_folder=os.path.join(TEST_INPUT_PATH, use_case),
            input_type=CSV_EXT,
            path_output_folder=os.path.join(TEST_OUTPUT_PATH, use_case),
        )

        # read json with results file
        data = load_json(
            os.path.join(TEST_OUTPUT_PATH, use_case, JSON_WITH_RESULTS))

        # read csv with expected values of the timeseries
        csv_file = "parameter_timeseries.csv"
        csv_data = pd.read_csv(
            os.path.join(TEST_INPUT_PATH, use_case, TIME_SERIES, csv_file))

        # constant variables
        diesel = "diesel_generator"
        dso = "DSO"
        diesel_efficiency = "diesel_efficiency"
        electricity_price = "electricity_price"

        for k in range(0, len(csv_data[diesel_efficiency])):
            assert data[ENERGY_CONVERSION][diesel][EFFICIENCY][VALUE][
                k] == pytest.approx(csv_data[diesel_efficiency][k], rel=1e-6)
            assert data[ENERGY_PROVIDERS][dso][ENERGY_PRICE][VALUE][
                k] == pytest.approx(csv_data[electricity_price][k], rel=1e-6)
            assert data[ENERGY_PRODUCTION][
                dso +
                DSO_CONSUMPTION][DISPATCH_PRICE][VALUE][k] == pytest.approx(
                    csv_data[electricity_price][k], rel=1e-6)
Ejemplo n.º 15
0
def test_check_energy_system_can_fulfill_max_demand_fails_mvs_runthrough(
        caplog):
    """This test makes sure that the C1.check_energy_system_can_fulfill_max_demand not only works as a function, but as an integrated function of the MVS model, as it is dependent on a lot of pre-processing steps where things in the future may be changed."""
    TEST_INPUT_PATH = os.path.join(TEST_REPO_PATH, "benchmark_test_inputs")
    TEST_OUTPUT_PATH = os.path.join(TEST_REPO_PATH, "benchmark_test_outputs")
    if os.path.exists(TEST_OUTPUT_PATH):
        shutil.rmtree(TEST_OUTPUT_PATH, ignore_errors=True)
    with pytest.raises(MVSOemofError):
        use_case = "validity_check_insufficient_capacities"
        main(
            overwrite=True,
            display_output="warning",
            path_input_folder=os.path.join(TEST_INPUT_PATH, use_case),
            input_type=CSV_EXT,
            path_output_folder=os.path.join(TEST_OUTPUT_PATH, use_case),
        )

    logfile = open(os.path.join(TEST_OUTPUT_PATH, use_case, LOGFILE), "r")
    log = logfile.read()
    logfile.close()
    assert "might have insufficient capacities" in log
    def test_benchmark_AE_grid_battery(self, margs):
        r"""
        Benchmark test for simple case grid and battery scenario. The grid should solely be used to feed the load.
        """
        use_case = "AE_grid_battery"
        main(
            overwrite=True,
            display_output="warning",
            path_input_folder=os.path.join(TEST_INPUT_PATH, use_case),
            input_type=CSV_EXT,
            path_output_folder=os.path.join(TEST_OUTPUT_PATH, use_case),
        )

        df_busses_flow = pd.read_excel(
            os.path.join(TEST_OUTPUT_PATH, use_case, "timeseries_all_busses.xlsx"),
            sheet_name="Electricity",
        )
        # make the time the index
        df_busses_flow = df_busses_flow.set_index("Unnamed: 0")
        # make sure battery is not used
        assert sum(df_busses_flow[f"battery {INPUT_POWER}"]) == 0
        assert sum(df_busses_flow[f"battery {OUTPUT_POWER}"]) == 0
    def test_benchmark_feedin_tariff_optimize_negative_value(self, margs):
        r"""
        Benchmark test for feed-in in a simple invest case with grid connected PV and negative feed-in tariff (pay for feed-in).
        """
        use_case = "Feedin_optimize"
        # set feed-in tariff to negative value
        filename = os.path.join(TEST_INPUT_PATH, use_case, CSV_ELEMENTS,
                                f"{ENERGY_PROVIDERS}.csv")
        df = pd.read_csv(filename).set_index("Unnamed: 0")
        df["DSO"][FEEDIN_TARIFF] = -float(df["DSO"][FEEDIN_TARIFF])
        df.to_csv(filename)

        main(
            overwrite=True,
            display_output="warning",
            path_input_folder=os.path.join(TEST_INPUT_PATH, use_case),
            input_type=CSV_EXT,
            path_output_folder=os.path.join(TEST_OUTPUT_PATH, use_case,
                                            "negative_value"),
        )
        # reset feed-in tariff just in case
        df["DSO"][FEEDIN_TARIFF] = -float(df["DSO"][FEEDIN_TARIFF])
        df.to_csv(filename)

        # get results
        df_busses_flow, cost_matrix, scalar_matrix, scalars = self.get_results(
            os.path.join(use_case, "negative_value"))

        # at a negative feed-in tariff and sufficiently installed PV capacity no
        # additional PV capacity should be installed, while no feed-in takes place.
        optimized_added_cap = scalar_matrix[OPTIMIZED_ADD_CAP]["pv_plant_01"]
        assert (
            optimized_added_cap == 0
        ), f"At a negative feed-in tariff and sufficiently installed PV capacity no additional PV capacity should be installed, while no feed-in takes place, however {optimized_added_cap} kWp is installed."
        feedin_sum = df_busses_flow[FEEDIN].sum()
        assert (
            feedin_sum == 0
        ), f"When the feed-in tariff is negative there should be no feed-in to the grid, however the sum of the feed-in time series is {feedin_sum}"
    def test_installedCap_zero_equal_installedCap_nan(self, margs):
        """
        This test checks if the invested storage capacity of an optimized GenericStorage
        where NaN is passed with installedCap is equal to the one of an optimized GenericStorage
        where zero is passed with installedCap.
        """
        use_cases = [
            "Thermal_storage_installedCap_nan",
            "Thermal_storage_installedCap_zero",
        ]

        storage_data_original = pd.read_csv(self.storage_csv,
                                            header=0,
                                            index_col=0)
        storage_data = storage_data_original.copy()
        storage_data["storage_01"][
            "storage_filename"] = self.storage_opt_with_fixed_losses_float
        storage_data.to_csv(self.storage_csv)

        for use_case in use_cases:
            output_path = os.path.join(TEST_OUTPUT_PATH, use_case)
            if os.path.exists(output_path):
                shutil.rmtree(output_path, ignore_errors=True)
            if os.path.exists(output_path) is False:
                os.mkdir(output_path)

            if use_case == "Thermal_storage_installedCap_nan":
                storage_xx_data_original = pd.read_csv(self.storage_xx,
                                                       header=0,
                                                       index_col=0)
                storage_xx_data = storage_xx_data_original.copy()
                storage_xx_data["storage capacity"][INSTALLED_CAP] = "NA"
                storage_xx_data.to_csv(self.storage_xx)
                try:
                    main(
                        display_output="warning",
                        path_input_folder=TEST_INPUT_PATH,
                        path_output_folder=os.path.join(
                            TEST_OUTPUT_PATH, use_case),
                        input_type="csv",
                        overwrite=True,
                        save_png=False,
                        lp_file_output=True,
                    )
                except:
                    print("Please check the main input parameters for errors. "
                          "This exception prevents that energyStorage.py is "
                          "overwritten in case running the main errors out.")

                storage_xx_data_original.to_csv(self.storage_xx, na_rep="NA")
                storage_data_original.to_csv(self.storage_csv)
                results_thermal_storage_installedCap_nan = pd.read_excel(
                    os.path.join(TEST_OUTPUT_PATH, use_case,
                                 "timeseries_all_busses.xlsx"),
                    sheet_name="Heat",
                )

            elif use_case == "Thermal_storage_installedCap_zero":
                try:
                    main(
                        display_output="warning",
                        path_input_folder=TEST_INPUT_PATH,
                        path_output_folder=os.path.join(
                            TEST_OUTPUT_PATH, use_case),
                        input_type="csv",
                        overwrite=True,
                        save_png=False,
                        lp_file_output=True,
                    )
                except:
                    print("Please check the main input parameters for errors. "
                          "This exception prevents that energyStorage.py is "
                          "overwritten in case running the main errors out.")
                results_thermal_storage_installedCap_zero = pd.read_excel(
                    os.path.join(TEST_OUTPUT_PATH, use_case,
                                 "timeseries_all_busses.xlsx"),
                    sheet_name="Heat",
                )

        assert (
            results_thermal_storage_installedCap_zero["TES input power"].
            values.all() == results_thermal_storage_installedCap_nan[
                "TES input power"].values.all()
        ), f"The invested storage capacity with {INSTALLED_CAP} that equals zero should be the same as with {INSTALLED_CAP} set to NaN"
    def test_fix_generic_storage_with_default_losses(self, margs):
        r"""
        This test checks if the fix GenericStorage matches the one with additional
        fixed thermal losses: fixed_thermal_losses_relative and
        fixed_thermal_losses_absolute.
        The simulation needs to run with a simple GenericStorage (Thermal storage without fixed
        thermal losses). To achieve this D1_model_components.py needs to be modified by
        commenting out the two parameters fixed_thermal_losses_relative and
        fixed_thermal_losses_absolute in storage_fix and storage_optimize functions.
        The assertion is a match of the timeseries_all_busses.xlsx files of the modified
        GenericStorage with the one of a simulation run with the implemented GenericStorage
        with fixed_thermal_losses_relative and fixed_thermal_losses_absolute.
        """

        storage_data_original = pd.read_csv(self.storage_csv,
                                            header=0,
                                            index_col=0)
        storage_data = storage_data_original.copy()
        storage_data["storage_01"][
            "storage_filename"] = self.storage_fix_without_fixed_losses
        storage_data.to_csv(self.storage_csv)

        use_cases = ["Generic_storage_fix", "Stratified_thermal_storage_fix"]
        for use_case in use_cases:
            output_path = os.path.join(TEST_OUTPUT_PATH, use_case)
            if os.path.exists(output_path):
                shutil.rmtree(output_path, ignore_errors=True)
            if os.path.exists(output_path) is False:
                os.mkdir(output_path)

            if use_case == "Generic_storage_fix":
                # Open D1_model_components.py and read its content
                mvs_D1 = open(self.D1).read()

                # Modify the content by commenting the fixed thermal losses out
                # in storage_fix and storage_optimize functions
                fixed_losses_generic_storage = [
                    "fixed_losses_absolute",
                    "fixed_losses_relative",
                ]
                for fixed_losses in fixed_losses_generic_storage:
                    if fixed_losses in mvs_D1:
                        mvs_D1_modified_string = mvs_D1.replace(
                            fixed_losses, "# " + fixed_losses)

                # Open D1_model_components.py in write modus and save the version
                # with commented out fixed losses
                mvs_D1_modified = open(self.D1, "w")
                mvs_D1_modified.write(mvs_D1_modified_string)
                mvs_D1_modified.close()

                # Run the simulation with a fix Generic Storage
                try:
                    main(
                        display_output="warning",
                        path_input_folder=TEST_INPUT_PATH,
                        path_output_folder=os.path.join(
                            TEST_OUTPUT_PATH, use_case),
                        input_type="csv",
                        overwrite=True,
                        save_png=False,
                        lp_file_output=True,
                    )
                except:
                    print(
                        "Please check the main input parameters for errors. "
                        "This exception prevents that D1_model_components.py is "
                        "overwritten in case running the main errors out.")

                # Revert changes made in D1_model_components.py
                mvs_D1_modified = open(self.D1, "w")
                mvs_D1_modified.write(mvs_D1)
                mvs_D1_modified.close()

                results_generic_storage = pd.read_excel(
                    os.path.join(TEST_OUTPUT_PATH, use_case,
                                 "timeseries_all_busses.xlsx"),
                    sheet_name="Heat",
                )

            elif use_case == "Stratified_thermal_storage_fix":
                main(
                    display_output="warning",
                    path_input_folder=TEST_INPUT_PATH,
                    path_output_folder=os.path.join(TEST_OUTPUT_PATH,
                                                    use_case),
                    input_type="csv",
                    overwrite=True,
                    save_png=False,
                    lp_file_output=True,
                )

                results_stratified_thermal_storage = pd.read_excel(
                    os.path.join(TEST_OUTPUT_PATH, use_case,
                                 "timeseries_all_busses.xlsx"),
                    sheet_name="Heat",
                )

        assert (
            results_generic_storage["TES input power"].values.all() ==
            results_stratified_thermal_storage["TES input power"].values.all()
        ), f"When the parameters {THERM_LOSSES_REL} and {THERM_LOSSES_ABS} are commented out in {self.D1} the results of the simulation should be the same as if {THERM_LOSSES_REL} and {THERM_LOSSES_ABS} are not used in the simulation"
        storage_data_original.to_csv(self.storage_csv)
Ejemplo n.º 20
0
    def test_benchmark_AE_grid_battery(self, margs):
        r"""
        Benchmark test for simple case grid and battery scenario. The grid should solely be used to feed the load.
        """
        use_case = "AE_grid_battery"
        main(
            overwrite=True,
            display_output="warning",
            path_input_folder=os.path.join(TEST_INPUT_PATH, use_case),
            input_type=CSV_EXT,
            path_output_folder=os.path.join(TEST_OUTPUT_PATH, use_case),
        )

        data = load_json(
            os.path.join(TEST_OUTPUT_PATH, use_case,
                         JSON_WITH_RESULTS + JSON_FILE_EXTENSION))

        No_optimize_no_cap_in_out = "No_optimize_no_cap_in_out"
        No_optimize_with_cap_in_out = "No_optimize_with_cap_in_out"

        description = "description"
        implemented_storage_assets = {
            No_optimize_no_cap_in_out: {
                description:
                "Storage asset with a set storage capacity but no input or output power capacity, not to be optimized.",
                OPTIMIZE_CAP: False,
            },
            No_optimize_with_cap_in_out: {
                description:
                "Storage asset with a set storage capacity as well as set input or output power capacity, not to be optimized.",
                OPTIMIZE_CAP: False,
            },
        }

        for storage_asset in data[ENERGY_STORAGE].keys():
            # Assertions that validate that the input files have not been changed.
            assert (
                storage_asset in implemented_storage_assets
            ), f"The defined storage asset {storage_asset} is not expected. It should be one of the assets {implemented_storage_assets.keys()}. It should be {implemented_storage_assets[storage_asset][description]}"
            exp_optimize = implemented_storage_assets[storage_asset][
                OPTIMIZE_CAP]
            res_optimize = data[ENERGY_STORAGE][storage_asset][OPTIMIZE_CAP][
                VALUE]
            assert (
                res_optimize == exp_optimize
            ), f"The {OPTIMIZE_CAP} of storage asset {storage_asset} should be {exp_optimize}, but is {res_optimize}. "

            if storage_asset == No_optimize_no_cap_in_out:
                for sub_item in [INPUT_POWER, OUTPUT_POWER, STORAGE_CAPACITY]:
                    # Assertions that validate that the input files are correct
                    if sub_item == STORAGE_CAPACITY:
                        assert (
                            data[ENERGY_STORAGE][storage_asset][sub_item]
                            [INSTALLED_CAP][VALUE] > 0
                        ), f"For this storage asset {storage_asset} the {INSTALLED_CAP} or {sub_item} should be > 0, as {implemented_storage_assets[storage_asset][description]}"
                        # Assertion that checks if flows are as expected
                        res = data[ENERGY_STORAGE][storage_asset][sub_item][
                            TOTAL_FLOW][VALUE]
                        assert (
                            res is None
                        ), f"With no input/output power capacities, storage asset {storage_asset} should have no flow though the {sub_item}."

                    else:
                        assert (
                            data[ENERGY_STORAGE][storage_asset][sub_item]
                            [INSTALLED_CAP][VALUE] == 0
                        ), f"For this storage asset {storage_asset} the {INSTALLED_CAP} or {sub_item} should be == 0, as {implemented_storage_assets[storage_asset][description]}."
                        # Assertion that checks if flows are as expected
                        res = data[ENERGY_STORAGE][storage_asset][sub_item][
                            TOTAL_FLOW][VALUE]
                        assert (
                            res == 0
                        ), f"With no input/output power capacities, storage asset {storage_asset} should have 0 flow though the {sub_item}."

            if storage_asset == No_optimize_with_cap_in_out:
                for sub_item in [INPUT_POWER, OUTPUT_POWER, STORAGE_CAPACITY]:
                    # Assertions that validate that the input files are correct
                    assert (
                        data[ENERGY_STORAGE][storage_asset][sub_item]
                        [INSTALLED_CAP][VALUE] > 0
                    ), f"For this storage asset {storage_asset} the {INSTALLED_CAP} or {sub_item} should be > 0, as {implemented_storage_assets[storage_asset][description]}."

                    # Assertion that checks if flows are as expected
                    res = data[ENERGY_STORAGE][storage_asset][sub_item][
                        TOTAL_FLOW][VALUE]
                    if sub_item == STORAGE_CAPACITY:
                        assert (
                            res is None
                        ), f"With input/output power capacities, storage asset {storage_asset} does have a timeseries, but as the stored energy in a timestep is not a flow, it does not have a {TOTAL_FLOW}."
                    else:
                        assert (
                            res >= 0
                        ), f"With input/output power capacities, storage asset {storage_asset} can have an flow though the {sub_item}, ie. {TOTAL_FLOW} can be >=0. Its value, though, is {res}."

            assert (
                TIMESERIES_SOC in data[ENERGY_STORAGE][storage_asset]
            ), f"The {TIMESERIES_SOC} of {storage_asset} was not calculated."
    def test_default_losses_and_zero_losses_equal_storage_capacity_series(
            self, margs):
        """
        This test checks if the invested storage capacity of an optimized GenericStorage
        without fixed thermal losses is equal to the one of an optimized GenericStorage
        with fixed_thermal_losses_relative and fixed_thermal_losses_absolute, which are
        zero and passed as time series.
        """
        use_cases = [
            "Thermal_storage_losses_default", "Thermal_storage_losses_zero"
        ]

        for use_case in use_cases:
            output_path = os.path.join(TEST_OUTPUT_PATH, use_case)
            if os.path.exists(output_path):
                shutil.rmtree(output_path, ignore_errors=True)
            if os.path.exists(output_path) is False:
                os.mkdir(output_path)

            if use_case == "Thermal_storage_losses_default":

                storage_data_original = pd.read_csv(self.storage_csv,
                                                    header=0,
                                                    index_col=0)
                storage_data = storage_data_original.copy()
                storage_data["storage_01"][
                    "storage_filename"] = self.storage_opt_without_fixed_losses
                storage_data.to_csv(self.storage_csv)

                try:
                    main(
                        display_output="warning",
                        path_input_folder=TEST_INPUT_PATH,
                        path_output_folder=os.path.join(
                            TEST_OUTPUT_PATH, use_case),
                        input_type="csv",
                        overwrite=True,
                        save_png=False,
                        lp_file_output=True,
                    )
                except:
                    print("Please check the main input parameters for errors. "
                          "This exception prevents that energyStorage.py is "
                          "overwritten in case running the main errors out.")

                storage_data_original.to_csv(self.storage_csv)
                results_thermal_storage_losses_default = pd.read_excel(
                    os.path.join(TEST_OUTPUT_PATH, use_case,
                                 "timeseries_all_busses.xlsx"),
                    sheet_name="Heat",
                )

            elif use_case == "Thermal_storage_losses_zero":

                storage_data_original = pd.read_csv(self.storage_csv,
                                                    header=0,
                                                    index_col=0)
                storage_data = storage_data_original.copy()
                storage_data["storage_01"][
                    "storage_filename"] = self.storage_opt_with_zero_fixed_losses_series
                storage_data.to_csv(self.storage_csv)

                try:
                    main(
                        display_output="warning",
                        path_input_folder=TEST_INPUT_PATH,
                        path_output_folder=os.path.join(
                            TEST_OUTPUT_PATH, use_case),
                        input_type="csv",
                        overwrite=True,
                        save_png=False,
                        lp_file_output=True,
                    )
                except:
                    print("Please check the main input parameters for errors. "
                          "This exception prevents that energyStorage.py is "
                          "overwritten in case running the main errors out.")

                storage_data_original.to_csv(self.storage_csv)
                results_thermal_storage_losses_zero = pd.read_excel(
                    os.path.join(TEST_OUTPUT_PATH, use_case,
                                 "timeseries_all_busses.xlsx"),
                    sheet_name="Heat",
                )

        assert (
            results_thermal_storage_losses_default["TES input power"].values.
            all() == results_thermal_storage_losses_zero["TES input power"].
            values.all()
        ), f"The invested storage capacity with passed losses {THERM_LOSSES_REL} and {THERM_LOSSES_ABS} that equal zero should be the same as without {THERM_LOSSES_REL} and {THERM_LOSSES_ABS}"
Ejemplo n.º 22
0
    def test_benchmark_AE_grid_battery_peak_pricing(self, margs):
        r"""
        Benchmark test for electricity grid peak demand pricing. To evaluate this, a battery is used. The battery should be charged at instances before the grid supplies peak demand. The battery is discharged when demand is higher than peak demand and charged when demand is smaller than peak demand.
        """
        use_case = "AE_grid_battery_peak_pricing"
        main(
            overwrite=True,
            display_output="warning",
            path_input_folder=os.path.join(TEST_INPUT_PATH, use_case),
            input_type=CSV_EXT,
            path_output_folder=os.path.join(TEST_OUTPUT_PATH, use_case),
        )
        # read json with results file
        data = load_json(
            os.path.join(TEST_OUTPUT_PATH, use_case,
                         JSON_WITH_RESULTS + JSON_FILE_EXTENSION),
            flag_missing_values=False,
        )
        peak_demand = [
            data[ENERGY_CONVERSION]
            ["Electricity grid DSO_consumption_period_1"][OPTIMIZED_ADD_CAP]
            [VALUE],
            data[ENERGY_CONVERSION]
            ["Electricity grid DSO_consumption_period_2"][OPTIMIZED_ADD_CAP]
            [VALUE],
            data[ENERGY_CONVERSION]
            ["Electricity grid DSO_consumption_period_2"][OPTIMIZED_ADD_CAP]
            [VALUE],
        ]
        # read timeseries_all_busses excel file
        busses_flow = pd.read_excel(
            os.path.join(TEST_OUTPUT_PATH, use_case,
                         "timeseries_all_busses.xlsx"),
            sheet_name="Electricity",
        )
        # make the time the index
        busses_flow = busses_flow.set_index("Unnamed: 0")
        # read the columns with the values to be used
        DSO_periods = [
            busses_flow["Electricity grid DSO_consumption_period_1"],
            busses_flow["Electricity grid DSO_consumption_period_2"],
            busses_flow["Electricity grid DSO_consumption_period_3"],
        ]
        demand = busses_flow["demand_01"]
        battery_charge = busses_flow[f"battery {INPUT_POWER}"]
        battery_discharge = busses_flow[f"battery {OUTPUT_POWER}"]

        # look for peak demand in period
        for j in range(0, 3):
            for i in range(0, len(DSO_periods[1])):
                # When the DSO is supplying peak demand while demand is smaller than supplied electricity.
                # Then, the battery is charged.
                if (DSO_periods[j][i] == peak_demand[j]
                        and abs(demand[i]) < DSO_periods[j][i]):
                    assert abs(battery_charge[i]) > 0
                # When DSO supplies peak demand and demand is larger then the peak demand,
                # Then, the battery has to be discharged
                if (DSO_periods[j][i] == peak_demand[j]
                        and abs(demand[i]) > DSO_periods[j][i]):
                    assert abs(battery_discharge[i]) > 0
                # If DSO supplies peak demand and the demand is larger then the supply,
                # then, in the previous timestep the battery must be charged,
                # as long as in the previous timestep the demand was smaller then the supply.
                if (DSO_periods[j][i] == peak_demand[j]
                        and abs(demand[i]) > DSO_periods[j][i]
                        and DSO_periods[j][i - 1] > abs(demand[i - 1])):
                    assert abs(battery_charge[i - 1]) > 0
    def test_benchmark_feature_parameters_as_timeseries(self, margs):
        r"""
        Notes
        -----
        This benchmark test checks if a scalar value can be provided as a timeseries within a csv file.
        It also checks whether these timeseries can be provided within a single csv file.
        """
        use_case = "Feature_parameters_as_timeseries"

        # Execute the script
        main(
            overwrite=True,
            display_output="warning",
            path_input_folder=os.path.join(TEST_INPUT_PATH, use_case),
            input_type=CSV_EXT,
            path_output_folder=os.path.join(TEST_OUTPUT_PATH, use_case),
        )

        # read json with results file
        data = load_json(
            os.path.join(TEST_OUTPUT_PATH, use_case,
                         JSON_WITH_RESULTS + JSON_FILE_EXTENSION),
            flag_missing_values=False,
        )

        # read csv with expected values of the timeseries
        csv_file = "parameter_timeseries.csv"
        csv_data = pd.read_csv(
            os.path.join(TEST_INPUT_PATH, use_case, TIME_SERIES, csv_file))

        # constant variables
        diesel = "diesel_generator"
        dso = "DSO"
        diesel_efficiency = "diesel_efficiency"
        electricity_price = "electricity_price"
        soc_min = "soc_min"

        assert (
            data[ENERGY_CONSUMPTION]["demand_01"][TIMESERIES][3] == 0
        ), f"The NaN value of the demand profile is not replaced by a 0 value as it should."
        for k in range(0, len(csv_data[diesel_efficiency])):
            assert data[ENERGY_CONVERSION][diesel][EFFICIENCY][VALUE][
                k] == pytest.approx(
                    csv_data[diesel_efficiency][k], rel=1e-6
                ), f"The diesel efficiency has different values then it was defined as with the csv file {csv_file}."
            assert data[ENERGY_PROVIDERS][dso][ENERGY_PRICE][VALUE][
                k] == pytest.approx(
                    csv_data[electricity_price][k], rel=1e-6
                ), f"The energy price has different values then it was defined as with the csv file {csv_file}."
            assert data[ENERGY_PRODUCTION][
                dso +
                DSO_CONSUMPTION][DISPATCH_PRICE][VALUE][k] == pytest.approx(
                    csv_data[electricity_price][k], rel=1e-6
                ), f"The feedin tariff has different values then it was defined as with the csv file {csv_file}."
            if k == 0 or k == 1:
                assert (
                    data[ENERGY_STORAGE]["storage_01"][STORAGE_CAPACITY]
                    [SOC_MIN][VALUE][k] == 0
                ), f"The NaN value of the soc min timeseries is not parsed as 0 as it should."
            else:
                assert data[ENERGY_STORAGE]["storage_01"][STORAGE_CAPACITY][
                    SOC_MIN][VALUE][k] == pytest.approx(
                        csv_data[soc_min][k], rel=1e-6
                    ), f"The soc min has different values then it was defined as with the csv file {csv_file}."
Ejemplo n.º 24
0
    def test_benchmark_Economic_KPI_C2_E2(self, margs):
        r"""
        Notes
        -----
        With this benchmark test, we evaluate the performance of the economic pre- and post-processing in C2 and E2.
        Values that have to be compared for each asset
        - LIFETIME_SPECIFIC_COST_OM
        - LIFETIME_PRICE_DISPATCH
        - LIFETIME_SPECIFIC_COST
        - ANNUITY_SPECIFIC_INVESTMENT_AND_OM
        - SIMULATION_ANNUITY
        - SPECIFIC_REPLACEMENT_COSTS_INSTALLED
        - SPECIFIC_REPLACEMENT_COSTS_OPTIMIZED
        - OPTIMIZED_ADD_CAP != 0, as we are not optimizing any asset
        - ANNUITY_OM
        - ANNUITY_TOTAL
        - COST_TOTAL
        - COST_OPERATIONAL_TOTAL
        - COST_OM
        - COST_DISPATCH
        - COST_INVESTMENT
        - COST_UPFRONT
        - COST_REPLACEMENT
        - LCOE_ASSET

        Overall economic values of the project:
        - NPV
        - Annuity

        """

        # Execute the script
        main(
            overwrite=True,
            display_output="warning",
            path_input_folder=os.path.join(TEST_INPUT_PATH, USE_CASE),
            input_type=CSV_EXT,
            path_output_folder=os.path.join(TEST_OUTPUT_PATH, USE_CASE),
        )

        # read json with results file
        data = load_json(
            os.path.join(
                TEST_OUTPUT_PATH, USE_CASE, JSON_WITH_RESULTS + JSON_FILE_EXTENSION
            ),
            flag_missing_values=False,
        )

        # Read expected values from file.
        expected_value_file = "test_data_economic_expected_values.csv"
        expected_values = pd.read_csv(
            os.path.join(TEST_INPUT_PATH, USE_CASE, expected_value_file),
            sep=",",
            index_col=0,
        )

        KEYS_TO_BE_EVALUATED_PER_ASSET = [
            LIFETIME_SPECIFIC_COST_OM,
            LIFETIME_PRICE_DISPATCH,
            LIFETIME_SPECIFIC_COST,
            ANNUITY_SPECIFIC_INVESTMENT_AND_OM,
            SIMULATION_ANNUITY,
            SPECIFIC_REPLACEMENT_COSTS_INSTALLED,
            SPECIFIC_REPLACEMENT_COSTS_OPTIMIZED,
            OPTIMIZED_ADD_CAP,
            COST_INVESTMENT,
            COST_UPFRONT,
            COST_REPLACEMENT,
            COST_OM,
            COST_DISPATCH,
            COST_OPERATIONAL_TOTAL,
            COST_TOTAL,
            ANNUITY_OM,
            ANNUITY_TOTAL,
            LCOE_ASSET,
        ]

        # Compare asset costs calculated in C2 and E2 with benchmark data from csv file
        for asset in expected_values.index:

            asset_group = expected_values.loc[asset, "group"]

            # determine asset dictionary (special for storages)
            if asset in [INPUT_POWER, OUTPUT_POWER, STORAGE_CAPACITY]:
                asset_data = data[asset_group]["storage_01"][asset]
            else:
                asset_data = data[asset_group][asset]
            # assertion
            for key in KEYS_TO_BE_EVALUATED_PER_ASSET:
                assert (
                    key in asset_data
                ), f"{key} is not in the asset data of {asset_group}, {asset}. It includes: {asset_data.keys()}."
                assert expected_values.loc[asset, key] == pytest.approx(
                    asset_data[key][VALUE], rel=1e-3
                ), f"Parameter {key} of asset {asset} is not of expected value, expected {expected_values.loc[asset, key]}, got {asset_data[key][VALUE]}."

        # Now we established that the externally calculated values are equal to the internally calculated values.
        # Therefore, we can now use the cost data from the assets to validate the cost data for the whole energy system.

        demand = pd.read_csv(
            os.path.join(TEST_INPUT_PATH, USE_CASE, TIME_SERIES, "demand.csv"), sep=",",
        )
        aggregated_demand = demand.sum()[0]

        KEYS_TO_BE_EVALUATED_FOR_TOTAL_SYSTEM = {
            COST_INVESTMENT: 0,
            COST_UPFRONT: 0,
            COST_REPLACEMENT: 0,
            COST_OM: 0,
            COST_DISPATCH: 0,
            COST_OPERATIONAL_TOTAL: 0,
            COST_TOTAL: 0,
            ANNUITY_OM: 0,
            ANNUITY_TOTAL: 0,
        }

        def add_to_key(KEYS_TO_BE_EVALUATED_FOR_TOTAL_SYSTEM, asset_data):
            """
            Add individual cost to each of the separate costs.

            Parameters
            ----------
            KEYS_TO_BE_EVALUATED_FOR_TOTAL_SYSTEM: dict
                dict of keys to be evaluated for system costs, to be updated
            asset_data: dict
                Asset data with economic parameters

            Returns
            -------
            Updated KEYS_TO_BE_EVALUATED_FOR_TOTAL_SYSTEM
            """
            for key in KEYS_TO_BE_EVALUATED_FOR_TOTAL_SYSTEM:
                KEYS_TO_BE_EVALUATED_FOR_TOTAL_SYSTEM.update(
                    {
                        key: KEYS_TO_BE_EVALUATED_FOR_TOTAL_SYSTEM[key]
                        + asset_data[key][VALUE]
                    }
                )

        for asset_group in (
            ENERGY_CONSUMPTION,
            ENERGY_CONVERSION,
            ENERGY_PRODUCTION,
            ENERGY_STORAGE,
        ):
            for asset in data[asset_group]:
                # for storage we look at the annuity of the in and out flows and storage capacity
                if asset_group == ENERGY_STORAGE:
                    for storage_type in [INPUT_POWER, OUTPUT_POWER, STORAGE_CAPACITY]:
                        asset_data = data[asset_group][asset][storage_type]
                        add_to_key(KEYS_TO_BE_EVALUATED_FOR_TOTAL_SYSTEM, asset_data)
                else:
                    asset_data = data[asset_group][asset]
                    add_to_key(KEYS_TO_BE_EVALUATED_FOR_TOTAL_SYSTEM, asset_data)

        for key in KEYS_TO_BE_EVALUATED_FOR_TOTAL_SYSTEM:
            assert KEYS_TO_BE_EVALUATED_FOR_TOTAL_SYSTEM[key] == pytest.approx(
                data[KPI][KPI_SCALARS_DICT][key], rel=1e-3
            ), f"The key {key} is not of expected value {KEYS_TO_BE_EVALUATED_FOR_TOTAL_SYSTEM[key]} but {data[KPI][KPI_SCALARS_DICT][key]}. This is based on the before established assertion, that the expected values of asset costs are equal to the ones in the json results file."

        # Compute the lcoe for this simple case from the data (single demand)
        lcoe = KEYS_TO_BE_EVALUATED_FOR_TOTAL_SYSTEM[ANNUITY_TOTAL] / aggregated_demand
        mvs_lcoe = data[KPI][KPI_SCALARS_DICT][LCOeleq]
        assert lcoe == pytest.approx(
            mvs_lcoe, rel=1e-3
        ), f"Parameter {LCOE_ASSET} of system is not of expected value (benchmark of {lcoe} versus computed value of {mvs_lcoe}."

        attributed_costs = 0
        for key in data[KPI][KPI_SCALARS_DICT]:
            if ATTRIBUTED_COSTS in key:

                attributed_costs += data[KPI][KPI_SCALARS_DICT][key]
        assert (
            attributed_costs == data[KPI][KPI_SCALARS_DICT][COST_TOTAL]
        ), f"The total attributed costs are not the costs of the total system."
Ejemplo n.º 25
0
    def test_renewable_factor_and_renewable_share_of_local_generation(self, margs):
        r"""
        Benchmark test that checks the calculation of
        * TOTAL_NON_RENEWABLE_GENERATION_IN_LES
        * TOTAL_RENEWABLE_GENERATION_IN_LES
        * TOTAL_NON_RENEWABLE_ENERGY_USE
        * TOTAL_RENEWABLE_ENERGY_USE
        * RENEWABLE_FACTOR
        * RENEWABLE_SHARE_OF_LOCAL_GENERATION
        For one sector, with only grid and PV present. Uses the simple scenarios for MVS testing as an input.
        """
        use_case = "AB_grid_PV"
        main(
            overwrite=True,
            display_output="warning",
            path_input_folder=os.path.join(TEST_INPUT_PATH, use_case),
            input_type=CSV_EXT,
            path_output_folder=os.path.join(TEST_OUTPUT_PATH, use_case),
        )
        # Check for RENEWABLE_FACTOR and RENEWABLE_SHARE_OF_LOCAL_GENERATION:
        data = load_json(
            os.path.join(
                TEST_OUTPUT_PATH, use_case, JSON_WITH_RESULTS + JSON_FILE_EXTENSION
            ),
            flag_missing_values=False,
        )

        # Get total flow of PV
        total_res_local = data[ENERGY_PRODUCTION]["pv_plant_01"][TOTAL_FLOW][VALUE]
        dso_consumption_source = (
            data[ENERGY_PROVIDERS]["Electricity_grid_DSO"][LABEL] + DSO_CONSUMPTION
        )
        total_supply_dso = data[ENERGY_PRODUCTION][dso_consumption_source][TOTAL_FLOW][
            VALUE
        ]

        assert (
            data[KPI][KPI_SCALARS_DICT][TOTAL_RENEWABLE_GENERATION_IN_LES]
            == total_res_local
        ), f"The total renewable generation is not equal to the generation of the PV system."
        assert (
            data[KPI][KPI_SCALARS_DICT][TOTAL_NON_RENEWABLE_GENERATION_IN_LES] == 0
        ), f"There is no local non-renewable generation asset, but there seems to be a non-renewable production."
        assert (
            data[KPI][KPI_SCALARS_DICT][TOTAL_RENEWABLE_ENERGY_USE] == total_res_local
        ), f"There is another renewable energy source apart from PV."
        assert (
            data[KPI][KPI_SCALARS_DICT][TOTAL_NON_RENEWABLE_ENERGY_USE]
            == total_supply_dso
        ), "The non-renewable energy use was expected to be all grid supply, but this does not hold true."
        assert data[KPI][KPI_SCALARS_DICT][RENEWABLE_FACTOR] == total_res_local / (
            total_res_local + total_supply_dso
        ), f"The {RENEWABLE_FACTOR} is not as expected."
        assert data[KPI][KPI_UNCOUPLED_DICT].loc[
            RENEWABLE_FACTOR, "Electricity"
        ] == pytest.approx(
            total_res_local / (total_res_local + total_supply_dso)
        ), f"The {RENEWABLE_FACTOR} is not as expected."
        assert (
            data[KPI][KPI_SCALARS_DICT][RENEWABLE_SHARE_OF_LOCAL_GENERATION] == 1
        ), f"The {RENEWABLE_SHARE_OF_LOCAL_GENERATION} is not as expected."
        assert (
            data[KPI][KPI_UNCOUPLED_DICT].loc[
                RENEWABLE_SHARE_OF_LOCAL_GENERATION, "Electricity"
            ]
            == 1
        ), f"The {RENEWABLE_SHARE_OF_LOCAL_GENERATION} is not as expected."
Ejemplo n.º 26
0
from multi_vector_simulator.cli import main

if __name__ == "__main__":
    main()
Ejemplo n.º 27
0
 def test_if_energy_system_network_graph_is_stored_if_no_pdf_nor_png_option(
         self, m_args):
     main(overwrite=True, display_output="warning")
     assert os.path.exists(os.path.join(TEST_OUTPUT_PATH,
                                        ES_GRAPH)) is False
Ejemplo n.º 28
0
    def test_benchmark_AB_grid_pv(self, margs):
        r"""
        Benchmark test for simple case grid connected PV, in which a fix capacity of PV is installed (installedCap, no optimization).

        Assertions performed:
        - The sum of energy consumption from the grid and PV generation is equal to the load (and flow to excess sink) at all times (ie. energy balance)
        - The sum of the flow to the excess sink is zero for time steps where demand equals or is greater than generation. This ensures that the total PV generation is used to cover the demand.
        - The PV generation time series in the results equals the input time series of specific PV generation multiplied by installed capacity. This ensures that `installedCap` is processed correctly within the model when an asset is not optimized.

        """
        use_case = "AB_grid_PV"
        main(
            overwrite=True,
            display_output="warning",
            path_input_folder=os.path.join(TEST_INPUT_PATH, use_case),
            input_type=CSV_EXT,
            path_output_folder=os.path.join(TEST_OUTPUT_PATH, use_case),
        )

        df_busses_flow = pd.read_excel(
            os.path.join(TEST_OUTPUT_PATH, use_case,
                         "timeseries_all_busses.xlsx"),
            sheet_name="Electricity",
        )
        # make the time the index
        df_busses_flow = df_busses_flow.set_index("Unnamed: 0")
        # compute the sum of the in and out of the electricity bus
        df_busses_flow["net_sum"] = df_busses_flow.sum(axis=1)

        # make sure the sum of the bus flow is always zero (there are rounding errors), energy balance
        assert df_busses_flow.net_sum.map(lambda x: 0
                                          if x < 1e-4 else 1).sum() == 0

        # make sure that electricity excess is zero whenever demand >= generation (this means that total pv generation
        # is used to cover the demand)
        selected_time_steps = df_busses_flow.loc[
            df_busses_flow["demand_01"].abs() >= df_busses_flow["pv_plant_01"]]
        excess = selected_time_steps[f"Electricity{EXCESS_SINK}"].sum()
        assert (
            excess == 0
        ), f"Total PV generation should be used to cover demand, i.e. electricity excess should be zero whenever demand >= generation, but excess is {excess}."

        # make sure that installedCap is processed correctly - pv time series of results
        # equal input pv time series times installedCap
        # get pv input time series and evaluated period (to shorten time series)
        input_time_series_pv = pd.read_csv(
            os.path.join(TEST_INPUT_PATH, use_case, TIME_SERIES,
                         "pv_solar_input.csv"))
        simulation_settings = pd.read_csv(
            os.path.join(TEST_INPUT_PATH, use_case, CSV_ELEMENTS,
                         f"{SIMULATION_SETTINGS}.csv")).set_index("Unnamed: 0")
        evaluated_period = float(
            simulation_settings[SIMULATION_SETTINGS][EVALUATED_PERIOD])
        # shorten input pv time series according to `evaluated_period`
        input_time_series_pv_shortened = input_time_series_pv[:int(
            evaluated_period * 24)]["kW"]
        # get result time series and installed pv capacity
        result_time_series_pv = df_busses_flow["pv_plant_01"]
        energy_production_data = pd.read_csv(
            os.path.join(TEST_INPUT_PATH, use_case, CSV_ELEMENTS,
                         f"{ENERGY_PRODUCTION}.csv")).set_index("Unnamed: 0")
        installed_capacity = float(
            energy_production_data["pv_plant_01"][INSTALLED_CAP])
        # adapt index
        result_time_series_pv.index = input_time_series_pv_shortened.index

        assert_series_equal(
            result_time_series_pv.astype(np.float64),
            input_time_series_pv_shortened * installed_capacity,
            check_names=False,
        )
Ejemplo n.º 29
0
    def test_maximum_cap_constraint(self, margs):
        r"""
        Notes
        -----
        With this benchmark test, the maximum capacity constraint is validated.
        The benchmark test passes if the optimized added capacity is less than or
        equal to the defined maximum capacity.

        """
        use_case = [
            "Constraint_maximum_capacity",
        ]
        # define empty dictionaries maximum capacity
        for case in use_case:
            main(
                overwrite=True,
                display_output="warning",
                path_input_folder=os.path.join(TEST_INPUT_PATH, case),
                input_type=CSV_EXT,
                path_output_folder=os.path.join(TEST_OUTPUT_PATH, case),
            )
            data = load_json(
                os.path.join(TEST_OUTPUT_PATH, case,
                             JSON_WITH_RESULTS + JSON_FILE_EXTENSION))

            # Energy conversion assets
            for conv_asset in data[ENERGY_CONVERSION]:
                # ToDo: another test asserting installedCap * time_series == time series in output
                # using the coupled definition for MaximumCap (includes InstalledCap + additional maximum optimizable capacity)
                max_tot_cap = data[ENERGY_CONVERSION][conv_asset][MAXIMUM_CAP][
                    VALUE]
                max_add_cap = data[ENERGY_CONVERSION][conv_asset][
                    MAXIMUM_ADD_CAP][VALUE]
                opt_add_cap = data[ENERGY_CONVERSION][conv_asset][
                    OPTIMIZED_ADD_CAP][VALUE]
                inst_cap = data[ENERGY_CONVERSION][conv_asset][INSTALLED_CAP][
                    VALUE]
                # case a) inst_cap > 0, max_tot_cap is None (optimizable capacity is unbounded)
                if conv_asset == "transformer_station_in":
                    assert (
                        inst_cap <= inst_cap + opt_add_cap
                    ), f"The installed capacity of {conv_asset} prior to optimization should be less than or equal to the total installed capacity after optimization, but here {inst_cap} > {inst_cap} + {opt_add_cap}."
                    assert (
                        max_tot_cap is None
                    ), f"The total maximum capacity of {conv_asset} should be None (as the user has defined it this way), but instead it is {max_tot_cap}."
                    assert (
                        max_add_cap is None
                    ), f"The total maximum capacity of {conv_asset} is set to None which means that the maximum additional capacity should also be None, but here it is {max_add_cap}."
                # case b) inst_cap > 0, max_tot_cap > 0, inst_cap <= max_tot_cap
                if conv_asset == "diesel_generator_1":
                    assert (
                        opt_add_cap <= max_add_cap
                    ), f"The optimized additional capacity of {conv_asset} should be less than or equal to the total maximum capacity - the installed capacity, but {opt_add_cap} > {max_add_cap}."
                    assert (
                        max_tot_cap == inst_cap + max_add_cap
                    ), f"The maximum total capacity of {conv_asset} should be equal to the already installed capacity + the maximum additional optimizable capacity, but {max_tot_cap} is not equal to {inst_cap} + {max_add_cap}."
                # case c) inst_cap = 0, max_tot_cap > 0
                if conv_asset == "solar_inverter_(mono)":
                    assert (
                        max_add_cap == max_tot_cap
                    ), f"Because the installed capacity of {conv_asset} is zero, the maximum additional capacity should be equal to the total maximum capacity, but {max_add_cap} is not equal to {max_tot_cap}."
                    assert (
                        opt_add_cap <= max_add_cap
                    ), f"The optimized additional capacity of {conv_asset} should be less than the maximum possible additional capacity, but {opt_add_cap} > {max_add_cap}."

            for prod_asset in data[ENERGY_PRODUCTION]:
                # using the coupled definition for MaximumCap (includes InstalledCap +  additional maximum optimizable capacity)
                max_tot_cap = data[ENERGY_PRODUCTION][prod_asset][MAXIMUM_CAP][
                    VALUE]
                max_add_cap = data[ENERGY_PRODUCTION][prod_asset][
                    MAXIMUM_ADD_CAP][VALUE]
                opt_add_cap = data[ENERGY_PRODUCTION][prod_asset][
                    OPTIMIZED_ADD_CAP][VALUE]
                inst_cap = data[ENERGY_PRODUCTION][prod_asset][INSTALLED_CAP][
                    VALUE]
                # case a) inst_cap > 0, max_tot_cap is None (optimizable capacity is unbounded)
                if prod_asset == "pv_plant_01":
                    assert (
                        inst_cap <= inst_cap + opt_add_cap
                    ), f"The installed capacity of {prod_asset} prior to optimization should be less than or equal to the total installed capacity after optimization, but here {inst_cap} > {inst_cap} + {opt_add_cap}."
                    assert (
                        max_tot_cap is None
                    ), f"The total maximum capacity of {prod_asset} should be None (as the user has defined it this way), but instead it is {max_tot_cap}."
                    assert (
                        max_add_cap is None
                    ), f"The total maximum capacity of {prod_asset} is set to None which means that the maximum additional capacity should also be None, but here it is {max_add_cap}."
                # case b) inst_cap > 0, max_tot_cap > 0, inst_cap <= max_tot_cap
                if prod_asset == "pv_plant_02":
                    assert (
                        opt_add_cap <= max_add_cap
                    ), f"The optimized additional capacity of the asset should be less than or equal to the total maximum capacity - the installed capacity, but {opt_add_cap} > {max_add_cap}."
                # case c) inst_cap = 0, max_tot_cap > 0
                if prod_asset == "pv_plant_03":
                    assert (
                        max_add_cap == max_tot_cap
                    ), f"Because the installed capacity of {prod_asset} is zero, the maximum additional capacity should be equal to the total maximum capacity, but {max_add_cap} is not equal to {max_tot_cap}."
                    assert (
                        opt_add_cap <= max_add_cap
                    ), f"The optimized additional capacity of {prod_asset} should be less than the maximum possible additional capacity, but {opt_add_cap} > {max_add_cap}."

                if prod_asset in ["pv_plant_01", "pv_plant_02", "pv_plant_03"]:
                    # check that the power output timeseries * total capacity of each production asset is equal to
                    # the calculated total flow (of each asset)
                    assert (
                        opt_add_cap + inst_cap
                    ) * data[ENERGY_PRODUCTION][prod_asset][TIMESERIES].sum(
                    ) == approx(
                        data[ENERGY_PRODUCTION][prod_asset][TOTAL_FLOW][VALUE],
                        rel=1e-3
                    ), f"The sum of the power output timeseries * total capacity chosen of {prod_asset} should be equal to calculated total flow of the asset, but this is not the case."
Ejemplo n.º 30
0
    def test_benchmark_Economic_KPI_C2_E2(self, margs):
        r"""
        Notes
        -----
        With this benchmark test, we evaluate the performance of the economic pre- and post-processing in C2 and E2.
        Values that have to be compared for each asset
        - LIFETIME_SPECIFIC_COST_OM
        - LIFETIME_PRICE_DISPATCH
        - LIFETIME_SPECIFIC_COST
        - ANNUITY_SPECIFIC_INVESTMENT_AND_OM
        - SIMULATION_ANNUITY
        - SPECIFIC_REPLACEMENT_COSTS_INSTALLED
        - SPECIFIC_REPLACEMENT_COSTS_OPTIMIZED
        - OPTIMIZED_ADD_CAP != 0, as we are not optimizing any asset
        - ANNUITY_OM
        - ANNUITY_TOTAL
        - COST_TOTAL
        - COST_OPERATIONAL_TOTAL
        - COST_OM
        - COST_DISPATCH
        - COST_INVESTMENT
        - COST_UPFRONT
        - COST_REPLACEMENT
        - LCOE_ASSET

        Overall economic values of the project:
        - NPV
        - Annuity

        """
        use_case = "Economic_KPI_C2_E2"

        # Execute the script
        main(
            overwrite=True,
            display_output="warning",
            path_input_folder=os.path.join(TEST_INPUT_PATH, use_case),
            input_type=CSV_EXT,
            path_output_folder=os.path.join(TEST_OUTPUT_PATH, use_case),
        )

        # read json with results file
        data = load_json(
            os.path.join(TEST_OUTPUT_PATH, use_case, JSON_WITH_RESULTS))

        # Read expected values from file. To edit the values, please use the .xls file first and convert the first tab to csv.
        expected_value_file = "test_data_economic_expected_values.csv"
        expected_values = pd.read_csv(
            os.path.join(TEST_INPUT_PATH, use_case, expected_value_file),
            sep=",",
            index_col=0,
        )
        # Define numbers in the csv as int/floats instead of str, but leave row "group" as a string
        groups = expected_values.loc["group"]
        # need to transpose the DataFrame before applying the conversion and retranspose after
        # the conversion because it does not follow the tidy data principle
        # see https://en.wikipedia.org/wiki/Tidy_data for more info
        expected_values = expected_values.T.apply(pd.to_numeric,
                                                  errors="ignore",
                                                  downcast="integer").T
        expected_values.loc["group"] = groups
        expected_values.loc[FLOW] = [0, 0, 0, 0, 0]

        KEYS_TO_BE_EVALUATED = [
            LIFETIME_SPECIFIC_COST_OM,
            LIFETIME_PRICE_DISPATCH,
            LIFETIME_SPECIFIC_COST,
            ANNUITY_SPECIFIC_INVESTMENT_AND_OM,
            SIMULATION_ANNUITY,
            SPECIFIC_REPLACEMENT_COSTS_INSTALLED,
            SPECIFIC_REPLACEMENT_COSTS_OPTIMIZED,
            OPTIMIZED_ADD_CAP,
            COST_INVESTMENT,
            COST_UPFRONT,
            COST_REPLACEMENT,
            COST_OM,
            COST_DISPATCH,
            COST_OPERATIONAL_TOTAL,
            COST_TOTAL,
            ANNUITY_OM,
            ANNUITY_TOTAL,
            LCOE_ASSET,
        ]

        # Derive expected values dependent on actual dispatch of the asset(s)
        for asset in expected_values.columns:
            # determine asset dictionary (special for storages)
            if asset in [INPUT_POWER, OUTPUT_POWER, STORAGE_CAPACITY]:
                asset_data = data[expected_values[asset]
                                  ["group"]]["storage_01"][asset]
            else:
                asset_data = data[expected_values[asset]["group"]][asset]
            # Get dispatch of the assets
            expected_values[asset][FLOW] = asset_data[FLOW]
            # Calculate cost parameters that are dependent on the flow
            expected_values[asset][COST_DISPATCH] = expected_values[asset][
                LIFETIME_PRICE_DISPATCH] * sum(expected_values[asset][FLOW])
            expected_values[asset][COST_OPERATIONAL_TOTAL] = (
                expected_values[asset][COST_DISPATCH] +
                expected_values[asset][COST_OM])
            expected_values[asset][COST_TOTAL] = (
                expected_values[asset][COST_OPERATIONAL_TOTAL] +
                expected_values[asset][COST_INVESTMENT])
            # Process cost
            expected_values[asset][ANNUITY_OM] = (
                expected_values[asset][COST_OPERATIONAL_TOTAL] *
                dict_economic[CRF][VALUE])
            expected_values[asset][ANNUITY_TOTAL] = (
                expected_values[asset][COST_TOTAL] * dict_economic[CRF][VALUE])
            if sum(expected_values[asset][FLOW]) == 0:
                expected_values[asset][LCOE_ASSET] = 0
            else:
                expected_values[asset][LCOE_ASSET] = expected_values[asset][
                    ANNUITY_TOTAL] / sum(expected_values[asset][FLOW])

        # Store to csv to enable manual check, eg. of LCOE_A. Only previously empty rows have been changed.
        expected_values.drop("flow").to_csv(os.path.join(
            TEST_OUTPUT_PATH, use_case, expected_value_file),
                                            sep=",")

        # Check if asset costs were correctly calculated in C2 and E2
        for asset in expected_values.columns:
            # determine asset dictionary (special for storages)
            if asset in [INPUT_POWER, OUTPUT_POWER, STORAGE_CAPACITY]:
                asset_data = data[expected_values[asset]
                                  ["group"]]["storage_01"][asset]
            else:
                asset_data = data[expected_values[asset]["group"]][asset]
            # assertion
            for key in KEYS_TO_BE_EVALUATED:
                assert expected_values[asset][key] == pytest.approx(
                    asset_data[key][VALUE], rel=1e-3
                ), f"Parameter {key} of asset {asset} is not of expected value."