Example #1
0
    def test_benchmark_AD_grid_diesel(self, margs):
        r"""
        Benchmark test for using a diesel generator with the electricity grid. In this benchmark test, the LCOE of the diesel generator is made less than the grid price and so it is solely used to supply the load.
        """
        use_case = "AD_grid_diesel"
        main(
            overwrite=True,
            display_output="warning",
            path_input_folder=os.path.join(TEST_INPUT_PATH, use_case),
            input_type=CSV_EXT,
            path_output_folder=os.path.join(TEST_OUTPUT_PATH, use_case),
        )

        # read json with results file
        data = load_json(
            os.path.join(TEST_OUTPUT_PATH, use_case,
                         JSON_WITH_RESULTS + JSON_FILE_EXTENSION),
            flag_missing_values=False,
        )

        # make sure LCOE_diesel is less than grid price, so that below test makes sense
        assert (data[ENERGY_CONVERSION]["diesel_generator"][LCOE_ASSET][VALUE]
                < data[ENERGY_PROVIDERS]["DSO"]["energy_price"][VALUE])

        # make sure grid is not used, ie. that diesel generator supplies all demand
        diesel_generator = data[ENERGY_CONVERSION]["diesel_generator"][FLOW]
        demand = data[ENERGY_CONSUMPTION]["demand_01"][FLOW]
        assert sum(diesel_generator) == approx(sum(demand), rel=1e-3)
def dict_values():
    answer = load_json(
        os.path.join(TEST_REPO_PATH, TEST_INPUT_DIRECTORY, "inputs_for_D0",
                     JSON_FNAME))
    answer[SIMULATION_SETTINGS].update({PATH_OUTPUT_FOLDER: TEST_OUTPUT_PATH})

    return answer
 def test_benchmark_AFG_grid_heatpump_heat(self, margs):
     r"""
     Benchmark test for a sector coupled energy system, including electricity and heat demand. A heat pump is used as a sector coupling asset. Both an electricity and heat DSO are present. The electricity tariff is defined as a time series. The heat pump is only used when its cost (energy_price/efficiency) is less than the heat DSO price.
     """
     use_case = "AFG_grid_heatpump_heat"
     main(
         overwrite=True,
         display_output="warning",
         path_input_folder=os.path.join(TEST_INPUT_PATH, use_case),
         input_type=CSV_EXT,
         path_output_folder=os.path.join(TEST_OUTPUT_PATH, use_case),
     )
     # read json with results file
     data = load_json(
         os.path.join(
             TEST_OUTPUT_PATH, use_case, JSON_WITH_RESULTS + JSON_FILE_EXTENSION
         ),
         flag_missing_values=False,
     )
     # read excel sheet with time series
     busses_flow = pd.read_excel(
         os.path.join(TEST_OUTPUT_PATH, use_case, "timeseries_all_busses.xlsx"),
         sheet_name="Heat",
     )
     # create dict with electricity prices
     electricity_price = data[ENERGY_PROVIDERS]["Grid_DSO"][ENERGY_PRICE][
         VALUE
     ].values
     # compare cost of using heat pump with electricity price to heat price
     cost_of_using_heatpump = "electricity_price[i] / data[ENERGY_CONVERSION]['heat_pump'][EFFICIENCY][VALUE] comp.data[ENERGY_PROVIDERS]['Heat_DSO'][ENERGY_PRICE][VALUE]"
     cost_of_using_heat_dso = (
         "data[ENERGY_PROVIDERS]['Heat_DSO'][ENERGY_PRICE][VALUE]"
     )
     for i in range(0, len(electricity_price)):
         if (
             electricity_price[i]
             / data[ENERGY_CONVERSION]["heat_pump"][EFFICIENCY][VALUE]
             > data[ENERGY_PROVIDERS]["Heat_DSO"][ENERGY_PRICE][VALUE]
         ):
             assert busses_flow["Heat_DSO_consumption_period"][i] == approx(
                 abs(busses_flow["demand_heat"][i])
             ), f"Even though the marginal costs to use the heat pump are higher than the heat DSO price with {cost_of_using_heatpump} comp. {cost_of_using_heat_dso}, the heat DSO is not solely used for energy supply."
         else:
             assert busses_flow["heat_pump"][i] == approx(
                 abs(busses_flow["demand_heat"][i])
             )
    def test_benchmark_feature_parameters_as_timeseries(self, margs):
        r"""
        Notes
        -----
        This benchmark test checks if a scalar value can be provided as a timeseries within a csv file.
        It also checks whether these timeseries can be provided within a single csv file.
        """
        use_case = "Feature_parameters_as_timeseries"

        # Execute the script
        main(
            overwrite=True,
            display_output="warning",
            path_input_folder=os.path.join(TEST_INPUT_PATH, use_case),
            input_type=CSV_EXT,
            path_output_folder=os.path.join(TEST_OUTPUT_PATH, use_case),
        )

        # read json with results file
        data = load_json(
            os.path.join(TEST_OUTPUT_PATH, use_case, JSON_WITH_RESULTS))

        # read csv with expected values of the timeseries
        csv_file = "parameter_timeseries.csv"
        csv_data = pd.read_csv(
            os.path.join(TEST_INPUT_PATH, use_case, TIME_SERIES, csv_file))

        # constant variables
        diesel = "diesel_generator"
        dso = "DSO"
        diesel_efficiency = "diesel_efficiency"
        electricity_price = "electricity_price"

        for k in range(0, len(csv_data[diesel_efficiency])):
            assert data[ENERGY_CONVERSION][diesel][EFFICIENCY][VALUE][
                k] == pytest.approx(csv_data[diesel_efficiency][k], rel=1e-6)
            assert data[ENERGY_PROVIDERS][dso][ENERGY_PRICE][VALUE][
                k] == pytest.approx(csv_data[electricity_price][k], rel=1e-6)
            assert data[ENERGY_PRODUCTION][
                dso +
                DSO_CONSUMPTION][DISPATCH_PRICE][VALUE][k] == pytest.approx(
                    csv_data[electricity_price][k], rel=1e-6)
Example #5
0
def single_param_variation_analysis(
    param_values, json_input, json_path_to_param_value, json_path_to_output_value=None
):
    r"""Run mvs simulations by varying one of the input parameters to access output's sensitivity

    Parameters
    ----------
    param_values: list of values (type can vary)
    json_input: path or dict
        input parameters for the multi-vector simulation
    json_path_to_param_value: tuple or str
        succession of keys which lead the value of the parameter to vary in the json_input dict
        potentially nested structure. The order of keys is to be read from left to right. In the
        case of str, each key should be separated by a `.` or a `,`.
    json_path_to_output_value: tuple of tuple or str, optional
        collection of succession of keys which lead the value of an output parameter of interest in
        the json dict of the simulation's output. The order of keys is to be read from left to
        right. In the case of str, each key should be separated by a `.` or a `,`.

    Returns
    -------
    The simulation output json matched to the list of variied parameter values
    """

    # Process the argument json_input based on its type
    if isinstance(json_input, str):
        # load the file if it is a path
        simulation_input = load_json(json_input)
    elif isinstance(json_input, dict):
        # this is already a json variable
        simulation_input = json_input
    else:
        simulation_input = None
        logging.error(
            f"Simulation input `{json_input}` is neither a file path, nor a json dict. "
            f"It can therefore not be processed."
        )
    param_path_tuple = split_nested_path(json_path_to_param_value)
    answer = []
    if simulation_input is not None:
        for param_val in param_values:
            # modify the value of the parameter before running a new simulation
            modified_input = set_nested_value(
                simulation_input, param_val, param_path_tuple
            )
            # run a simulation with next value of the variable parameter and convert the result to
            # mvs special json type
            sim_output_json = run_simulation(
                modified_input, display_output="error", epa_format=False
            )
            print(sim_output_json)
            if json_path_to_output_value is None:
                answer.append(sim_output_json)
            else:
                output_parameters = {}
                # for each of the output parameter path, add the value located under this path in
                # the final json dict, that could also be applied to the full json dict as
                # post-processing
                for output_param in json_path_to_output_value:
                    output_param = split_nested_path(output_param)
                    output_parameters[output_param] = get_nested_value(
                        sim_output_json, output_param
                    )
                answer.append(output_parameters)

    return {"parameters": param_values, "outputs": answer}
Example #6
0
                            title="Warning Messages",
                            content=insert_log_messages(
                                log_dict=results_json[SIMULATION_RESULTS][LOGS]
                                [WARNINGS]),
                        ),
                        insert_subsection(
                            title="Error Messages",
                            content=insert_log_messages(
                                log_dict=results_json[SIMULATION_RESULTS][LOGS]
                                [ERRORS]),
                        ),
                    ]),
                ],
            ),
        ],
    )
    return app


if __name__ == "__main__":
    from multi_vector_simulator.utils.constants import REPO_PATH, OUTPUT_FOLDER
    from multi_vector_simulator.B0_data_input_json import load_json

    dict_values = load_json(
        os.path.join(REPO_PATH, OUTPUT_FOLDER,
                     JSON_WITH_RESULTS + JSON_FILE_EXTENSION))

    test_app = create_app(dict_values)
    # open_in_browser(test_app)
    test_app.run_server(debug=True)
    def test_benchmark_feature_parameters_as_timeseries(self, margs):
        r"""
        Notes
        -----
        This benchmark test checks if a scalar value can be provided as a timeseries within a csv file.
        It also checks whether these timeseries can be provided within a single csv file.
        """
        use_case = "Feature_parameters_as_timeseries"

        # Execute the script
        main(
            overwrite=True,
            display_output="warning",
            path_input_folder=os.path.join(TEST_INPUT_PATH, use_case),
            input_type=CSV_EXT,
            path_output_folder=os.path.join(TEST_OUTPUT_PATH, use_case),
        )

        # read json with results file
        data = load_json(
            os.path.join(TEST_OUTPUT_PATH, use_case,
                         JSON_WITH_RESULTS + JSON_FILE_EXTENSION),
            flag_missing_values=False,
        )

        # read csv with expected values of the timeseries
        csv_file = "parameter_timeseries.csv"
        csv_data = pd.read_csv(
            os.path.join(TEST_INPUT_PATH, use_case, TIME_SERIES, csv_file))

        # constant variables
        diesel = "diesel_generator"
        dso = "DSO"
        diesel_efficiency = "diesel_efficiency"
        electricity_price = "electricity_price"
        soc_min = "soc_min"

        assert (
            data[ENERGY_CONSUMPTION]["demand_01"][TIMESERIES][3] == 0
        ), f"The NaN value of the demand profile is not replaced by a 0 value as it should."
        for k in range(0, len(csv_data[diesel_efficiency])):
            assert data[ENERGY_CONVERSION][diesel][EFFICIENCY][VALUE][
                k] == pytest.approx(
                    csv_data[diesel_efficiency][k], rel=1e-6
                ), f"The diesel efficiency has different values then it was defined as with the csv file {csv_file}."
            assert data[ENERGY_PROVIDERS][dso][ENERGY_PRICE][VALUE][
                k] == pytest.approx(
                    csv_data[electricity_price][k], rel=1e-6
                ), f"The energy price has different values then it was defined as with the csv file {csv_file}."
            assert data[ENERGY_PRODUCTION][
                dso +
                DSO_CONSUMPTION][DISPATCH_PRICE][VALUE][k] == pytest.approx(
                    csv_data[electricity_price][k], rel=1e-6
                ), f"The feedin tariff has different values then it was defined as with the csv file {csv_file}."
            if k == 0 or k == 1:
                assert (
                    data[ENERGY_STORAGE]["storage_01"][STORAGE_CAPACITY]
                    [SOC_MIN][VALUE][k] == 0
                ), f"The NaN value of the soc min timeseries is not parsed as 0 as it should."
            else:
                assert data[ENERGY_STORAGE]["storage_01"][STORAGE_CAPACITY][
                    SOC_MIN][VALUE][k] == pytest.approx(
                        csv_data[soc_min][k], rel=1e-6
                    ), f"The soc min has different values then it was defined as with the csv file {csv_file}."
Example #8
0
    def test_renewable_factor_and_renewable_share_of_local_generation(self, margs):
        r"""
        Benchmark test that checks the calculation of
        * TOTAL_NON_RENEWABLE_GENERATION_IN_LES
        * TOTAL_RENEWABLE_GENERATION_IN_LES
        * TOTAL_NON_RENEWABLE_ENERGY_USE
        * TOTAL_RENEWABLE_ENERGY_USE
        * RENEWABLE_FACTOR
        * RENEWABLE_SHARE_OF_LOCAL_GENERATION
        For one sector, with only grid and PV present. Uses the simple scenarios for MVS testing as an input.
        """
        use_case = "AB_grid_PV"
        main(
            overwrite=True,
            display_output="warning",
            path_input_folder=os.path.join(TEST_INPUT_PATH, use_case),
            input_type=CSV_EXT,
            path_output_folder=os.path.join(TEST_OUTPUT_PATH, use_case),
        )
        # Check for RENEWABLE_FACTOR and RENEWABLE_SHARE_OF_LOCAL_GENERATION:
        data = load_json(
            os.path.join(
                TEST_OUTPUT_PATH, use_case, JSON_WITH_RESULTS + JSON_FILE_EXTENSION
            ),
            flag_missing_values=False,
        )

        # Get total flow of PV
        total_res_local = data[ENERGY_PRODUCTION]["pv_plant_01"][TOTAL_FLOW][VALUE]
        dso_consumption_source = (
            data[ENERGY_PROVIDERS]["Electricity_grid_DSO"][LABEL] + DSO_CONSUMPTION
        )
        total_supply_dso = data[ENERGY_PRODUCTION][dso_consumption_source][TOTAL_FLOW][
            VALUE
        ]

        assert (
            data[KPI][KPI_SCALARS_DICT][TOTAL_RENEWABLE_GENERATION_IN_LES]
            == total_res_local
        ), f"The total renewable generation is not equal to the generation of the PV system."
        assert (
            data[KPI][KPI_SCALARS_DICT][TOTAL_NON_RENEWABLE_GENERATION_IN_LES] == 0
        ), f"There is no local non-renewable generation asset, but there seems to be a non-renewable production."
        assert (
            data[KPI][KPI_SCALARS_DICT][TOTAL_RENEWABLE_ENERGY_USE] == total_res_local
        ), f"There is another renewable energy source apart from PV."
        assert (
            data[KPI][KPI_SCALARS_DICT][TOTAL_NON_RENEWABLE_ENERGY_USE]
            == total_supply_dso
        ), "The non-renewable energy use was expected to be all grid supply, but this does not hold true."
        assert data[KPI][KPI_SCALARS_DICT][RENEWABLE_FACTOR] == total_res_local / (
            total_res_local + total_supply_dso
        ), f"The {RENEWABLE_FACTOR} is not as expected."
        assert data[KPI][KPI_UNCOUPLED_DICT].loc[
            RENEWABLE_FACTOR, "Electricity"
        ] == pytest.approx(
            total_res_local / (total_res_local + total_supply_dso)
        ), f"The {RENEWABLE_FACTOR} is not as expected."
        assert (
            data[KPI][KPI_SCALARS_DICT][RENEWABLE_SHARE_OF_LOCAL_GENERATION] == 1
        ), f"The {RENEWABLE_SHARE_OF_LOCAL_GENERATION} is not as expected."
        assert (
            data[KPI][KPI_UNCOUPLED_DICT].loc[
                RENEWABLE_SHARE_OF_LOCAL_GENERATION, "Electricity"
            ]
            == 1
        ), f"The {RENEWABLE_SHARE_OF_LOCAL_GENERATION} is not as expected."
Example #9
0
    def test_benchmark_Economic_KPI_C2_E2(self, margs):
        r"""
        Notes
        -----
        With this benchmark test, we evaluate the performance of the economic pre- and post-processing in C2 and E2.
        Values that have to be compared for each asset
        - LIFETIME_SPECIFIC_COST_OM
        - LIFETIME_PRICE_DISPATCH
        - LIFETIME_SPECIFIC_COST
        - ANNUITY_SPECIFIC_INVESTMENT_AND_OM
        - SIMULATION_ANNUITY
        - SPECIFIC_REPLACEMENT_COSTS_INSTALLED
        - SPECIFIC_REPLACEMENT_COSTS_OPTIMIZED
        - OPTIMIZED_ADD_CAP != 0, as we are not optimizing any asset
        - ANNUITY_OM
        - ANNUITY_TOTAL
        - COST_TOTAL
        - COST_OPERATIONAL_TOTAL
        - COST_OM
        - COST_DISPATCH
        - COST_INVESTMENT
        - COST_UPFRONT
        - COST_REPLACEMENT
        - LCOE_ASSET

        Overall economic values of the project:
        - NPV
        - Annuity

        """

        # Execute the script
        main(
            overwrite=True,
            display_output="warning",
            path_input_folder=os.path.join(TEST_INPUT_PATH, USE_CASE),
            input_type=CSV_EXT,
            path_output_folder=os.path.join(TEST_OUTPUT_PATH, USE_CASE),
        )

        # read json with results file
        data = load_json(
            os.path.join(
                TEST_OUTPUT_PATH, USE_CASE, JSON_WITH_RESULTS + JSON_FILE_EXTENSION
            ),
            flag_missing_values=False,
        )

        # Read expected values from file.
        expected_value_file = "test_data_economic_expected_values.csv"
        expected_values = pd.read_csv(
            os.path.join(TEST_INPUT_PATH, USE_CASE, expected_value_file),
            sep=",",
            index_col=0,
        )

        KEYS_TO_BE_EVALUATED_PER_ASSET = [
            LIFETIME_SPECIFIC_COST_OM,
            LIFETIME_PRICE_DISPATCH,
            LIFETIME_SPECIFIC_COST,
            ANNUITY_SPECIFIC_INVESTMENT_AND_OM,
            SIMULATION_ANNUITY,
            SPECIFIC_REPLACEMENT_COSTS_INSTALLED,
            SPECIFIC_REPLACEMENT_COSTS_OPTIMIZED,
            OPTIMIZED_ADD_CAP,
            COST_INVESTMENT,
            COST_UPFRONT,
            COST_REPLACEMENT,
            COST_OM,
            COST_DISPATCH,
            COST_OPERATIONAL_TOTAL,
            COST_TOTAL,
            ANNUITY_OM,
            ANNUITY_TOTAL,
            LCOE_ASSET,
        ]

        # Compare asset costs calculated in C2 and E2 with benchmark data from csv file
        for asset in expected_values.index:

            asset_group = expected_values.loc[asset, "group"]

            # determine asset dictionary (special for storages)
            if asset in [INPUT_POWER, OUTPUT_POWER, STORAGE_CAPACITY]:
                asset_data = data[asset_group]["storage_01"][asset]
            else:
                asset_data = data[asset_group][asset]
            # assertion
            for key in KEYS_TO_BE_EVALUATED_PER_ASSET:
                assert (
                    key in asset_data
                ), f"{key} is not in the asset data of {asset_group}, {asset}. It includes: {asset_data.keys()}."
                assert expected_values.loc[asset, key] == pytest.approx(
                    asset_data[key][VALUE], rel=1e-3
                ), f"Parameter {key} of asset {asset} is not of expected value, expected {expected_values.loc[asset, key]}, got {asset_data[key][VALUE]}."

        # Now we established that the externally calculated values are equal to the internally calculated values.
        # Therefore, we can now use the cost data from the assets to validate the cost data for the whole energy system.

        demand = pd.read_csv(
            os.path.join(TEST_INPUT_PATH, USE_CASE, TIME_SERIES, "demand.csv"), sep=",",
        )
        aggregated_demand = demand.sum()[0]

        KEYS_TO_BE_EVALUATED_FOR_TOTAL_SYSTEM = {
            COST_INVESTMENT: 0,
            COST_UPFRONT: 0,
            COST_REPLACEMENT: 0,
            COST_OM: 0,
            COST_DISPATCH: 0,
            COST_OPERATIONAL_TOTAL: 0,
            COST_TOTAL: 0,
            ANNUITY_OM: 0,
            ANNUITY_TOTAL: 0,
        }

        def add_to_key(KEYS_TO_BE_EVALUATED_FOR_TOTAL_SYSTEM, asset_data):
            """
            Add individual cost to each of the separate costs.

            Parameters
            ----------
            KEYS_TO_BE_EVALUATED_FOR_TOTAL_SYSTEM: dict
                dict of keys to be evaluated for system costs, to be updated
            asset_data: dict
                Asset data with economic parameters

            Returns
            -------
            Updated KEYS_TO_BE_EVALUATED_FOR_TOTAL_SYSTEM
            """
            for key in KEYS_TO_BE_EVALUATED_FOR_TOTAL_SYSTEM:
                KEYS_TO_BE_EVALUATED_FOR_TOTAL_SYSTEM.update(
                    {
                        key: KEYS_TO_BE_EVALUATED_FOR_TOTAL_SYSTEM[key]
                        + asset_data[key][VALUE]
                    }
                )

        for asset_group in (
            ENERGY_CONSUMPTION,
            ENERGY_CONVERSION,
            ENERGY_PRODUCTION,
            ENERGY_STORAGE,
        ):
            for asset in data[asset_group]:
                # for storage we look at the annuity of the in and out flows and storage capacity
                if asset_group == ENERGY_STORAGE:
                    for storage_type in [INPUT_POWER, OUTPUT_POWER, STORAGE_CAPACITY]:
                        asset_data = data[asset_group][asset][storage_type]
                        add_to_key(KEYS_TO_BE_EVALUATED_FOR_TOTAL_SYSTEM, asset_data)
                else:
                    asset_data = data[asset_group][asset]
                    add_to_key(KEYS_TO_BE_EVALUATED_FOR_TOTAL_SYSTEM, asset_data)

        for key in KEYS_TO_BE_EVALUATED_FOR_TOTAL_SYSTEM:
            assert KEYS_TO_BE_EVALUATED_FOR_TOTAL_SYSTEM[key] == pytest.approx(
                data[KPI][KPI_SCALARS_DICT][key], rel=1e-3
            ), f"The key {key} is not of expected value {KEYS_TO_BE_EVALUATED_FOR_TOTAL_SYSTEM[key]} but {data[KPI][KPI_SCALARS_DICT][key]}. This is based on the before established assertion, that the expected values of asset costs are equal to the ones in the json results file."

        # Compute the lcoe for this simple case from the data (single demand)
        lcoe = KEYS_TO_BE_EVALUATED_FOR_TOTAL_SYSTEM[ANNUITY_TOTAL] / aggregated_demand
        mvs_lcoe = data[KPI][KPI_SCALARS_DICT][LCOeleq]
        assert lcoe == pytest.approx(
            mvs_lcoe, rel=1e-3
        ), f"Parameter {LCOE_ASSET} of system is not of expected value (benchmark of {lcoe} versus computed value of {mvs_lcoe}."

        attributed_costs = 0
        for key in data[KPI][KPI_SCALARS_DICT]:
            if ATTRIBUTED_COSTS in key:

                attributed_costs += data[KPI][KPI_SCALARS_DICT][key]
        assert (
            attributed_costs == data[KPI][KPI_SCALARS_DICT][COST_TOTAL]
        ), f"The total attributed costs are not the costs of the total system."
Example #10
0
def process_expected_values():
    """
    Processes expected values from `test_data_economic_expected_values.csv`.

    Derive expected values dependent on actual dispatch of the asset(s)
    for asset in expected_values.columns:


    Returns
    -------
    Save expected values to `expected_value_file`, to be used in benchmark tests
    """
    # To edit the values, please use the test_data_economic_expected_values.xls file first and convert the first tab to csv.
    expected_value_file = "test_data_economic_expected_values.csv"
    expected_values = pd.read_csv(
        os.path.join(TEST_INPUT_PATH, USE_CASE, expected_value_file),
        sep=",",
        index_col=0,
    )

    # read json with results file
    data = load_json(
        os.path.join(
            TEST_OUTPUT_PATH, USE_CASE, JSON_WITH_RESULTS + JSON_FILE_EXTENSION
        )
    )

    for asset in expected_values.index:

        # determine asset dictionary (special for storages)
        result_key = expected_values[asset]["group"]

        if asset in [INPUT_POWER, OUTPUT_POWER, STORAGE_CAPACITY]:
            asset_data = data[result_key]["storage_01"][asset]
        else:
            asset_data = data[result_key][asset]

        # get dispatch of the assets
        expected_values[asset][FLOW] = asset_data[FLOW]

        # calculate cost parameters that are dependent on the flow
        expected_values[asset][COST_DISPATCH] = expected_values[asset][
            LIFETIME_PRICE_DISPATCH
        ] * sum(expected_values[asset][FLOW])
        expected_values[asset][COST_OPERATIONAL_TOTAL] = (
            expected_values[asset][COST_DISPATCH] + expected_values[asset][COST_OM]
        )
        expected_values[asset][COST_TOTAL] = (
            expected_values[asset][COST_OPERATIONAL_TOTAL]
            + expected_values[asset][COST_INVESTMENT]
        )

        # process cost
        expected_values[asset][ANNUITY_OM] = (
            expected_values[asset][COST_OPERATIONAL_TOTAL] * DICT_ECONOMIC[CRF][VALUE]
        )
        expected_values[asset][ANNUITY_TOTAL] = (
            expected_values[asset][COST_TOTAL] * DICT_ECONOMIC[CRF][VALUE]
        )
        if sum(expected_values[asset][FLOW]) == 0:
            expected_values[asset][LCOE_ASSET] = 0
        else:
            expected_values[asset][LCOE_ASSET] = expected_values[asset][
                ANNUITY_TOTAL
            ] / sum(expected_values[asset][FLOW])

    # store to csv to enable manual check, eg. of lcoe_a. only previously empty rows have been changed.
    expected_values.drop("flow").to_csv(
        os.path.join(TEST_OUTPUT_PATH, USE_CASE, expected_value_file), sep=","
    )
Example #11
0
    def test_benchmark_Economic_KPI_C2_E2(self, margs):
        r"""
        Notes
        -----
        With this benchmark test, we evaluate the performance of the economic pre- and post-processing in C2 and E2.
        Values that have to be compared for each asset
        - LIFETIME_SPECIFIC_COST_OM
        - LIFETIME_PRICE_DISPATCH
        - LIFETIME_SPECIFIC_COST
        - ANNUITY_SPECIFIC_INVESTMENT_AND_OM
        - SIMULATION_ANNUITY
        - SPECIFIC_REPLACEMENT_COSTS_INSTALLED
        - SPECIFIC_REPLACEMENT_COSTS_OPTIMIZED
        - OPTIMIZED_ADD_CAP != 0, as we are not optimizing any asset
        - ANNUITY_OM
        - ANNUITY_TOTAL
        - COST_TOTAL
        - COST_OPERATIONAL_TOTAL
        - COST_OM
        - COST_DISPATCH
        - COST_INVESTMENT
        - COST_UPFRONT
        - COST_REPLACEMENT
        - LCOE_ASSET

        Overall economic values of the project:
        - NPV
        - Annuity

        """
        use_case = "Economic_KPI_C2_E2"

        # Execute the script
        main(
            overwrite=True,
            display_output="warning",
            path_input_folder=os.path.join(TEST_INPUT_PATH, use_case),
            input_type=CSV_EXT,
            path_output_folder=os.path.join(TEST_OUTPUT_PATH, use_case),
        )

        # read json with results file
        data = load_json(
            os.path.join(TEST_OUTPUT_PATH, use_case, JSON_WITH_RESULTS))

        # Read expected values from file. To edit the values, please use the .xls file first and convert the first tab to csv.
        expected_value_file = "test_data_economic_expected_values.csv"
        expected_values = pd.read_csv(
            os.path.join(TEST_INPUT_PATH, use_case, expected_value_file),
            sep=",",
            index_col=0,
        )
        # Define numbers in the csv as int/floats instead of str, but leave row "group" as a string
        groups = expected_values.loc["group"]
        # need to transpose the DataFrame before applying the conversion and retranspose after
        # the conversion because it does not follow the tidy data principle
        # see https://en.wikipedia.org/wiki/Tidy_data for more info
        expected_values = expected_values.T.apply(pd.to_numeric,
                                                  errors="ignore",
                                                  downcast="integer").T
        expected_values.loc["group"] = groups
        expected_values.loc[FLOW] = [0, 0, 0, 0, 0]

        KEYS_TO_BE_EVALUATED = [
            LIFETIME_SPECIFIC_COST_OM,
            LIFETIME_PRICE_DISPATCH,
            LIFETIME_SPECIFIC_COST,
            ANNUITY_SPECIFIC_INVESTMENT_AND_OM,
            SIMULATION_ANNUITY,
            SPECIFIC_REPLACEMENT_COSTS_INSTALLED,
            SPECIFIC_REPLACEMENT_COSTS_OPTIMIZED,
            OPTIMIZED_ADD_CAP,
            COST_INVESTMENT,
            COST_UPFRONT,
            COST_REPLACEMENT,
            COST_OM,
            COST_DISPATCH,
            COST_OPERATIONAL_TOTAL,
            COST_TOTAL,
            ANNUITY_OM,
            ANNUITY_TOTAL,
            LCOE_ASSET,
        ]

        # Derive expected values dependent on actual dispatch of the asset(s)
        for asset in expected_values.columns:
            # determine asset dictionary (special for storages)
            if asset in [INPUT_POWER, OUTPUT_POWER, STORAGE_CAPACITY]:
                asset_data = data[expected_values[asset]
                                  ["group"]]["storage_01"][asset]
            else:
                asset_data = data[expected_values[asset]["group"]][asset]
            # Get dispatch of the assets
            expected_values[asset][FLOW] = asset_data[FLOW]
            # Calculate cost parameters that are dependent on the flow
            expected_values[asset][COST_DISPATCH] = expected_values[asset][
                LIFETIME_PRICE_DISPATCH] * sum(expected_values[asset][FLOW])
            expected_values[asset][COST_OPERATIONAL_TOTAL] = (
                expected_values[asset][COST_DISPATCH] +
                expected_values[asset][COST_OM])
            expected_values[asset][COST_TOTAL] = (
                expected_values[asset][COST_OPERATIONAL_TOTAL] +
                expected_values[asset][COST_INVESTMENT])
            # Process cost
            expected_values[asset][ANNUITY_OM] = (
                expected_values[asset][COST_OPERATIONAL_TOTAL] *
                dict_economic[CRF][VALUE])
            expected_values[asset][ANNUITY_TOTAL] = (
                expected_values[asset][COST_TOTAL] * dict_economic[CRF][VALUE])
            if sum(expected_values[asset][FLOW]) == 0:
                expected_values[asset][LCOE_ASSET] = 0
            else:
                expected_values[asset][LCOE_ASSET] = expected_values[asset][
                    ANNUITY_TOTAL] / sum(expected_values[asset][FLOW])

        # Store to csv to enable manual check, eg. of LCOE_A. Only previously empty rows have been changed.
        expected_values.drop("flow").to_csv(os.path.join(
            TEST_OUTPUT_PATH, use_case, expected_value_file),
                                            sep=",")

        # Check if asset costs were correctly calculated in C2 and E2
        for asset in expected_values.columns:
            # determine asset dictionary (special for storages)
            if asset in [INPUT_POWER, OUTPUT_POWER, STORAGE_CAPACITY]:
                asset_data = data[expected_values[asset]
                                  ["group"]]["storage_01"][asset]
            else:
                asset_data = data[expected_values[asset]["group"]][asset]
            # assertion
            for key in KEYS_TO_BE_EVALUATED:
                assert expected_values[asset][key] == pytest.approx(
                    asset_data[key][VALUE], rel=1e-3
                ), f"Parameter {key} of asset {asset} is not of expected value."
Example #12
0
    def test_benchmark_AE_grid_battery_peak_pricing(self, margs):
        r"""
        Benchmark test for electricity grid peak demand pricing. To evaluate this, a battery is used. The battery should be charged at instances before the grid supplies peak demand. The battery is discharged when demand is higher than peak demand and charged when demand is smaller than peak demand.
        """
        use_case = "AE_grid_battery_peak_pricing"
        main(
            overwrite=True,
            display_output="warning",
            path_input_folder=os.path.join(TEST_INPUT_PATH, use_case),
            input_type=CSV_EXT,
            path_output_folder=os.path.join(TEST_OUTPUT_PATH, use_case),
        )
        # read json with results file
        data = load_json(
            os.path.join(TEST_OUTPUT_PATH, use_case,
                         JSON_WITH_RESULTS + JSON_FILE_EXTENSION),
            flag_missing_values=False,
        )
        peak_demand = [
            data[ENERGY_CONVERSION]
            ["Electricity grid DSO_consumption_period_1"][OPTIMIZED_ADD_CAP]
            [VALUE],
            data[ENERGY_CONVERSION]
            ["Electricity grid DSO_consumption_period_2"][OPTIMIZED_ADD_CAP]
            [VALUE],
            data[ENERGY_CONVERSION]
            ["Electricity grid DSO_consumption_period_2"][OPTIMIZED_ADD_CAP]
            [VALUE],
        ]
        # read timeseries_all_busses excel file
        busses_flow = pd.read_excel(
            os.path.join(TEST_OUTPUT_PATH, use_case,
                         "timeseries_all_busses.xlsx"),
            sheet_name="Electricity",
        )
        # make the time the index
        busses_flow = busses_flow.set_index("Unnamed: 0")
        # read the columns with the values to be used
        DSO_periods = [
            busses_flow["Electricity grid DSO_consumption_period_1"],
            busses_flow["Electricity grid DSO_consumption_period_2"],
            busses_flow["Electricity grid DSO_consumption_period_3"],
        ]
        demand = busses_flow["demand_01"]
        battery_charge = busses_flow[f"battery {INPUT_POWER}"]
        battery_discharge = busses_flow[f"battery {OUTPUT_POWER}"]

        # look for peak demand in period
        for j in range(0, 3):
            for i in range(0, len(DSO_periods[1])):
                # When the DSO is supplying peak demand while demand is smaller than supplied electricity.
                # Then, the battery is charged.
                if (DSO_periods[j][i] == peak_demand[j]
                        and abs(demand[i]) < DSO_periods[j][i]):
                    assert abs(battery_charge[i]) > 0
                # When DSO supplies peak demand and demand is larger then the peak demand,
                # Then, the battery has to be discharged
                if (DSO_periods[j][i] == peak_demand[j]
                        and abs(demand[i]) > DSO_periods[j][i]):
                    assert abs(battery_discharge[i]) > 0
                # If DSO supplies peak demand and the demand is larger then the supply,
                # then, in the previous timestep the battery must be charged,
                # as long as in the previous timestep the demand was smaller then the supply.
                if (DSO_periods[j][i] == peak_demand[j]
                        and abs(demand[i]) > DSO_periods[j][i]
                        and DSO_periods[j][i - 1] > abs(demand[i - 1])):
                    assert abs(battery_charge[i - 1]) > 0
Example #13
0
    def test_benchmark_AE_grid_battery(self, margs):
        r"""
        Benchmark test for simple case grid and battery scenario. The grid should solely be used to feed the load.
        """
        use_case = "AE_grid_battery"
        main(
            overwrite=True,
            display_output="warning",
            path_input_folder=os.path.join(TEST_INPUT_PATH, use_case),
            input_type=CSV_EXT,
            path_output_folder=os.path.join(TEST_OUTPUT_PATH, use_case),
        )

        data = load_json(
            os.path.join(TEST_OUTPUT_PATH, use_case,
                         JSON_WITH_RESULTS + JSON_FILE_EXTENSION))

        No_optimize_no_cap_in_out = "No_optimize_no_cap_in_out"
        No_optimize_with_cap_in_out = "No_optimize_with_cap_in_out"

        description = "description"
        implemented_storage_assets = {
            No_optimize_no_cap_in_out: {
                description:
                "Storage asset with a set storage capacity but no input or output power capacity, not to be optimized.",
                OPTIMIZE_CAP: False,
            },
            No_optimize_with_cap_in_out: {
                description:
                "Storage asset with a set storage capacity as well as set input or output power capacity, not to be optimized.",
                OPTIMIZE_CAP: False,
            },
        }

        for storage_asset in data[ENERGY_STORAGE].keys():
            # Assertions that validate that the input files have not been changed.
            assert (
                storage_asset in implemented_storage_assets
            ), f"The defined storage asset {storage_asset} is not expected. It should be one of the assets {implemented_storage_assets.keys()}. It should be {implemented_storage_assets[storage_asset][description]}"
            exp_optimize = implemented_storage_assets[storage_asset][
                OPTIMIZE_CAP]
            res_optimize = data[ENERGY_STORAGE][storage_asset][OPTIMIZE_CAP][
                VALUE]
            assert (
                res_optimize == exp_optimize
            ), f"The {OPTIMIZE_CAP} of storage asset {storage_asset} should be {exp_optimize}, but is {res_optimize}. "

            if storage_asset == No_optimize_no_cap_in_out:
                for sub_item in [INPUT_POWER, OUTPUT_POWER, STORAGE_CAPACITY]:
                    # Assertions that validate that the input files are correct
                    if sub_item == STORAGE_CAPACITY:
                        assert (
                            data[ENERGY_STORAGE][storage_asset][sub_item]
                            [INSTALLED_CAP][VALUE] > 0
                        ), f"For this storage asset {storage_asset} the {INSTALLED_CAP} or {sub_item} should be > 0, as {implemented_storage_assets[storage_asset][description]}"
                        # Assertion that checks if flows are as expected
                        res = data[ENERGY_STORAGE][storage_asset][sub_item][
                            TOTAL_FLOW][VALUE]
                        assert (
                            res is None
                        ), f"With no input/output power capacities, storage asset {storage_asset} should have no flow though the {sub_item}."

                    else:
                        assert (
                            data[ENERGY_STORAGE][storage_asset][sub_item]
                            [INSTALLED_CAP][VALUE] == 0
                        ), f"For this storage asset {storage_asset} the {INSTALLED_CAP} or {sub_item} should be == 0, as {implemented_storage_assets[storage_asset][description]}."
                        # Assertion that checks if flows are as expected
                        res = data[ENERGY_STORAGE][storage_asset][sub_item][
                            TOTAL_FLOW][VALUE]
                        assert (
                            res == 0
                        ), f"With no input/output power capacities, storage asset {storage_asset} should have 0 flow though the {sub_item}."

            if storage_asset == No_optimize_with_cap_in_out:
                for sub_item in [INPUT_POWER, OUTPUT_POWER, STORAGE_CAPACITY]:
                    # Assertions that validate that the input files are correct
                    assert (
                        data[ENERGY_STORAGE][storage_asset][sub_item]
                        [INSTALLED_CAP][VALUE] > 0
                    ), f"For this storage asset {storage_asset} the {INSTALLED_CAP} or {sub_item} should be > 0, as {implemented_storage_assets[storage_asset][description]}."

                    # Assertion that checks if flows are as expected
                    res = data[ENERGY_STORAGE][storage_asset][sub_item][
                        TOTAL_FLOW][VALUE]
                    if sub_item == STORAGE_CAPACITY:
                        assert (
                            res is None
                        ), f"With input/output power capacities, storage asset {storage_asset} does have a timeseries, but as the stored energy in a timestep is not a flow, it does not have a {TOTAL_FLOW}."
                    else:
                        assert (
                            res >= 0
                        ), f"With input/output power capacities, storage asset {storage_asset} can have an flow though the {sub_item}, ie. {TOTAL_FLOW} can be >=0. Its value, though, is {res}."

            assert (
                TIMESERIES_SOC in data[ENERGY_STORAGE][storage_asset]
            ), f"The {TIMESERIES_SOC} of {storage_asset} was not calculated."
Example #14
0
                        insert_subsection(
                            title="Warning Messages",
                            content=insert_log_messages(
                                log_dict=results_json[SIMULATION_RESULTS][LOGS]
                                [WARNINGS]),
                        ),
                        insert_subsection(
                            title="Error Messages",
                            content=insert_log_messages(
                                log_dict=results_json[SIMULATION_RESULTS][LOGS]
                                [ERRORS]),
                        ),
                    ]),
                ],
            ),
        ],
    )
    return app


if __name__ == "__main__":
    from multi_vector_simulator.utils.constants import REPO_PATH, OUTPUT_FOLDER
    from multi_vector_simulator.B0_data_input_json import load_json

    dict_values = load_json(
        os.path.join(REPO_PATH, OUTPUT_FOLDER, JSON_WITH_RESULTS))

    test_app = create_app(dict_values)
    # open_in_browser(test_app)
    test_app.run_server(debug=True)