def test_check_input_path_posix(): """Verify the code works on both windows and linux path systems""" if os.name == "posix": folder = A0.check_input_folder("{}/inputs/".format(TEST_REPO_PATH), JSON_EXT) else: folder = A0.check_input_folder("{}\\inputs\\".format(TEST_REPO_PATH), JSON_EXT) assert folder == os.path.join(JSON_PATH)
def test_if_f_opt_preexisting_path_output_folder_should_be_replaced(self, m_args): if os.path.exists(self.test_out_path) is False: os.mkdir(self.test_out_path) some_file = os.path.join(self.test_out_path, "a_file.txt") with open(some_file, "w") as of: of.write("something") A0.process_user_arguments() assert os.path.exists(some_file) is False
def test_if_json_opt_and_more_than_one_json_file_in_input_folder_raise_fileexists_error( self, m_args): # create a folder with two json files os.mkdir(self.fake_input_path) with open(os.path.join(self.fake_input_path, JSON_FNAME), "w") as of: of.write("something") with open(os.path.join(self.fake_input_path, "file2.json"), "w") as of: of.write("something") with pytest.raises(FileExistsError): A0.process_user_arguments()
def test_load_json_copies_json_file_to_output_folder(self, m_args): A0.process_user_arguments() A1.create_input_json(input_directory=CSV_PATH, pass_back=True) dict_values = B0.load_json(JSON_CSV_PATH, path_output_folder=self.test_out_path, move_copy=True) assert (os.path.exists( os.path.join( dict_values[SIMULATION_SETTINGS][PATH_OUTPUT_FOLDER_INPUTS], CSV_FNAME, )) is True)
def setup_module(m_args): """Run the simulation up to module E0 and save dict_values before and after evaluation""" if os.path.exists(TEST_OUTPUT_PATH): shutil.rmtree(TEST_OUTPUT_PATH, ignore_errors=True) user_input = A0.process_user_arguments() logging.debug("Accessing script: B0_data_input_json") dict_values = B0.load_json( user_input[PATH_INPUT_FILE], path_input_folder=user_input[PATH_INPUT_FOLDER], path_output_folder=user_input[PATH_OUTPUT_FOLDER], move_copy=False, ) logging.debug("Accessing script: C0_data_processing") C0.all(dict_values) logging.debug("Accessing script: D0_modelling_and_optimization") results_meta, results_main = D0.run_oemof(dict_values) with open(DICT_BEFORE, "wb") as handle: pickle.dump(dict_values, handle, protocol=pickle.HIGHEST_PROTOCOL) logging.debug("Accessing script: E0_evaluation") E0.evaluate_dict(dict_values, results_main, results_meta) with open(DICT_AFTER, "wb") as handle: pickle.dump(dict_values, handle, protocol=pickle.HIGHEST_PROTOCOL)
def setup_class(m_args): """Run the simulation up to module E0 and prepare bus_data for E1""" if os.path.exists(TEST_OUTPUT_PATH): shutil.rmtree(TEST_OUTPUT_PATH, ignore_errors=True) logging.debug("Accessing script: A0_initialization") user_input = A0.process_user_arguments() A1.create_input_json(input_directory=os.path.join( user_input[PATH_INPUT_FOLDER], CSV_ELEMENTS)) logging.debug("Accessing script: B0_data_input_json") dict_values = B0.load_json( user_input[PATH_INPUT_FILE], path_input_folder=user_input[PATH_INPUT_FOLDER], path_output_folder=user_input[PATH_OUTPUT_FOLDER], move_copy=True, set_default_values=True, ) logging.debug("Accessing script: C0_data_processing") C0.all(dict_values) logging.debug("Accessing script: D0_modelling_and_optimization") results_meta, results_main = D0.run_oemof(dict_values) bus_data = {} # Store all information related to busses in bus_data for bus in dict_values[ENERGY_BUSSES]: # Read all energy flows from busses bus_data.update({bus: solph.views.node(results_main, bus)}) # Pickle dump bus data with open(BUS_DATA_DUMP, "wb") as handle: pickle.dump(bus_data, handle, protocol=pickle.HIGHEST_PROTOCOL)
def test_if_csv_opt_path_input_file_set_to_path_input_folder_mvs_csv_config_dot_json( self, m_args, tmpdir): """Check that the path_input_file is <path_input_folder>/mvs_csv_config.json """ os.mkdir(self.fake_input_path) os.mkdir(os.path.join(self.fake_input_path, CSV_ELEMENTS)) user_inputs = A0.process_user_arguments() assert user_inputs[PATH_INPUT_FILE] == os.path.join( self.fake_input_path, CSV_ELEMENTS, CSV_FNAME)
def test_load_json_removes_json_file_from_inputs_folder(self, m_args): A0.process_user_arguments() A1.create_input_json(input_directory=CSV_PATH, pass_back=True) dict_values = B0.load_json(JSON_CSV_PATH, path_output_folder=self.test_out_path, move_copy=True) assert os.path.exists(os.path.join( CSV_PATH, CSV_ELEMENTS, CSV_FNAME, )) is False assert os.path.exists(os.path.join( CSV_PATH, CSV_FNAME, )) is False
def run_parts(margs): if os.path.exists(TEST_OUTPUT_PATH): shutil.rmtree(TEST_OUTPUT_PATH, ignore_errors=True) user_input = A0.process_user_arguments() logging.debug("Accessing script: B0_data_input_json") dict_values = B0.load_json( user_input[PATH_INPUT_FILE], path_input_folder=user_input[PATH_INPUT_FOLDER], path_output_folder=user_input[PATH_OUTPUT_FOLDER], move_copy=False, ) logging.debug("Accessing script: C0_data_processing") C0.all(dict_values) logging.debug("Run parts of D0_modelling_and_optimization") model, dict_model = D0.model_building.initialize(dict_values) model = D0.model_building.adding_assets_to_energysystem_model( dict_values, dict_model, model) return dict_values, model, dict_model
}, } B0.retrieve_date_time_info(simulation_settings) for k in (START_DATE, END_DATE, TIME_INDEX): assert (k in simulation_settings.keys() ), f"Function does not add {k} to the simulation settings." assert simulation_settings[START_DATE] == pd.Timestamp( "2020-01-01 00:00:00"), f"Function incorrectly parses the timestamp." assert simulation_settings[END_DATE] == pd.Timestamp( "2020-01-01 23:00:00"), f"Function incorrectly parses the timestamp." assert ( simulation_settings[PERIODS] == 24 ), f"Function incorrectly identifies the number of evaluated periods." PARSER = A0.mvs_arg_parser() class TestTemporaryJsonFileDisposal: test_in_path = os.path.join(TEST_REPO_PATH, "inputs") test_out_path = os.path.join(TEST_REPO_PATH, "MVS_outputs") @mock.patch( "argparse.ArgumentParser.parse_args", return_value=PARSER.parse_args([ "-f", "-log", "warning", "-i", test_in_path,
def main(**kwargs): r""" Starts MVS tool simulations. Other Parameters ---------------- overwrite : bool, optional Determines whether to replace existing results in `path_output_folder` with the results of the current simulation (True) or not (False). Default: False. pdf_report: bool, optional Can generate an automatic pdf report of the simulation's results (True) or not (False) Default: False. input_type : str, optional Defines whether the input is taken from the `mvs_config.json` file ("json") or from csv files ('csv') located within <path_input_folder>/csv_elements/. Default: 'json'. path_input_folder : str, optional The path to the directory where the input CSVs/JSON files are located. Default: 'inputs/'. path_output_folder : str, optional The path to the directory where the results of the simulation such as the plots, time series, results JSON files are saved by MVS E-Lands. Default: 'MVS_outputs/' display_output : str, optional Sets the level of displayed logging messages. Options: "debug", "info", "warning", "error". Default: "info". lp_file_output : bool, optional Specifies whether linear equation system generated is saved as lp file. Default: False. """ welcome_text = ( "\n \n Multi-Vector Simulation Tool (MVS) V" + version_num + " " + "\n Version: " + version_date + " " + '\n Part of the toolbox of H2020 project "E-LAND", ' + "Integrated multi-vector management system for Energy isLANDs" + "\n Coded at: Reiner Lemoine Institute (Berlin) " + "\n Contributors: Martha M. Hoffmann \n \n ") logging.debug("Accessing script: A0_initialization") user_input = A0.process_user_arguments(welcome_text=welcome_text, **kwargs) # Read all inputs # print("") # # todo: is user input completely used? # dict_values = data_input.load_json(user_input[PATH_INPUT_FILE ]) move_copy_config_file = False if user_input[INPUT_TYPE] == CSV_EXT: logging.debug("Accessing script: A1_csv_to_json") move_copy_config_file = True A1.create_input_json(input_directory=os.path.join( user_input[PATH_INPUT_FOLDER], CSV_ELEMENTS)) logging.debug("Accessing script: B0_data_input_json") dict_values = B0.load_json( user_input[PATH_INPUT_FILE], path_input_folder=user_input[PATH_INPUT_FOLDER], path_output_folder=user_input[PATH_OUTPUT_FOLDER], move_copy=move_copy_config_file, set_default_values=True, ) F0.store_as_json( dict_values, dict_values[SIMULATION_SETTINGS][PATH_OUTPUT_FOLDER_INPUTS], MVS_CONFIG, ) print("") logging.debug("Accessing script: C0_data_processing") C0.all(dict_values) F0.store_as_json( dict_values, dict_values[SIMULATION_SETTINGS][PATH_OUTPUT_FOLDER], JSON_PROCESSED, ) if "path_pdf_report" in user_input or "path_png_figs" in user_input: save_energy_system_graph = True else: save_energy_system_graph = False print("") logging.debug("Accessing script: D0_modelling_and_optimization") results_meta, results_main = D0.run_oemof( dict_values, save_energy_system_graph=save_energy_system_graph, ) print("") logging.debug("Accessing script: E0_evaluation") E0.evaluate_dict(dict_values, results_main, results_meta) logging.debug("Accessing script: F0_outputs") F0.evaluate_dict( dict_values, path_pdf_report=user_input.get("path_pdf_report", None), path_png_figs=user_input.get("path_png_figs", None), ) return 1
def report(pdf=None, path_simulation_output_json=None, path_pdf_report=None): """Display the report of a MVS simulation Command line use: .. code-block:: bash mvs_report [-h] [-i [PATH_SIM_OUTPUT]] [-o [REPORT_PATH]] [-pdf] optional command line arguments: -h, --help show this help message and exit -pdf [PRINT_REPORT] print the report as pdf (default: False) -i [OUTPUT_FOLDER] path to the simulation result json file 'json_with_results.json' -o [REPORT_PATH] path to save the pdf report -d [DEBUG_REPORT] run the dash app in debug mode with hot-reload Parameters ---------- pdf: bool if True a pdf report should be generated Default: False path_simulation_output_json: str path to the simulation result json file 'json_with_results.json' path_pdf_report: str path to save the pdf report Returns ------- Save a pdf report if option -pdf is provided, otherwise display the report as an app """ # Parse the arguments from the command line parser = A0.report_arg_parser() args = vars(parser.parse_args()) # Give priority from user input kwargs over command line arguments # However the command line arguments have priority over default kwargs if pdf is None: pdf = args.get(ARG_PDF, False) if path_simulation_output_json is None: path_simulation_output_json = args.get(ARG_PATH_SIM_OUTPUT) if path_pdf_report is None: path_pdf_report = args.get(ARG_REPORT_PATH) # if the user only provided the path to the folder, we complete with default json file if os.path.isdir(path_simulation_output_json) is True: path_simulation_output_json = os.path.join( path_simulation_output_json, JSON_WITH_RESULTS + JSON_FILE_EXTENSION) logging.warning( f"Only path to a folder provided ({args.get(ARG_PATH_SIM_OUTPUT)}), looking now for default {JSON_WITH_RESULTS + JSON_FILE_EXTENSION} file within this folder" ) if os.path.exists(path_simulation_output_json) is False: raise FileNotFoundError( "Simulation results file {} not found. You need to run a simulation to generate " "the data before you can generate a report\n\n\tsee `mvs_tool -h` for help on how " "to run a simulation\n".format(path_simulation_output_json)) else: # path to the mvs simulation output files path_sim_output = os.path.dirname(path_simulation_output_json) # if report path is not specified it will be included in the mvs simulation outputs folder if path_pdf_report == "": path_pdf_report = os.path.join(path_sim_output, REPORT_FOLDER, PDF_REPORT) # load the results of a simulation dict_values = B0.load_json(path_simulation_output_json, flag_missing_values=False) test_app = create_app(dict_values, path_sim_output=path_sim_output) banner = "*" * 40 print(banner + "\nPress ctrl+c to stop the report server\n" + banner) if pdf is True: print_pdf(test_app, path_pdf_report=path_pdf_report) else: if args.get(ARG_DEBUG_REPORT) is True: test_app.run_server(debug=True) else: # run the dash server for 600s before shutting it down open_in_browser(test_app, timeout=600) print( banner + "\nThe report server has timed out.\nTo start it again run " "`mvs_report`.\nTo let it run for a longer time, change timeout setting in " "the cli.py file\n" + banner)
def test_if_path_output_folder_exists_raise_fileexists_error(self, m_args): """The error message should advice the user to use -f option to force overwrite""" if os.path.exists(self.test_out_path) is False: os.mkdir(self.test_out_path) with pytest.raises(FileExistsError): A0.process_user_arguments()
PATHS_TO_PLOTS: { PLOTS_BUSSES: [], PLOTS_DEMANDS: [], PLOTS_RESOURCES: [], PLOTS_ES: [], PLOTS_PERFORMANCE: [], PLOTS_COSTS: [], } } SECTOR = "Electricity" INTERVAL = 2 OUTPUT_PATH = os.path.join(TEST_REPO_PATH, "test_outputs") PARSER = initializing.mvs_arg_parser() TEST_INPUT_PATH = os.path.join(TEST_REPO_PATH, TEST_INPUT_DIRECTORY, "inputs_F1_plot_es_graph") TEST_OUTPUT_PATH = os.path.join(TEST_REPO_PATH, "F1_outputs") # Data for test_if_plot_of_all_energy_flows_for_all_sectors_are_stored_for_14_days USER_INPUT = {PATH_OUTPUT_FOLDER: OUTPUT_PATH} PROJECT_DATA = {PROJECT_NAME: "a_project", SCENARIO_NAME: "a_scenario"} RESULTS_TIMESERIES = pd.read_csv( os.path.join(DUMMY_CSV_PATH, "plot_data_for_F1.csv"), sep=";", header=0, index_col=0,
def report(pdf=None, path_simulation_output_json=None, path_pdf_report=None): """Display the report of a MVS simulation Command line use: .. code-block:: bash mvs_report [-h] [-i [PATH_SIM_OUTPUT]] [-o [REPORT_PATH]] [-pdf] optional command line arguments: -h, --help show this help message and exit -pdf [PRINT_REPORT] print the report as pdf (default: False) -i [OUTPUT_FOLDER] path to the simulation result json file 'json_with_results.json' -o [REPORT_PATH] path to save the pdf report Parameters ---------- pdf: bool if True a pdf report should be generated Default: False path_simulation_output_json: str path to the simulation result json file 'json_with_results.json' path_pdf_report: str path to save the pdf report Returns ------- Save a pdf report if option -pdf is provided, otherwise display the report as an app """ # Parse the arguments from the command line parser = initializing.report_arg_parser() args = vars(parser.parse_args()) print(args) # Give priority from user input kwargs over command line arguments # However the command line arguments have priority over default kwargs if pdf is None: pdf = args.get(ARG_PDF, False) if path_simulation_output_json is None: path_simulation_output_json = args.get(ARG_PATH_SIM_OUTPUT) if path_pdf_report is None: path_pdf_report = args.get(ARG_REPORT_PATH) # if the user only provided the path to the folder, we complete with default json file if os.path.isdir(path_simulation_output_json) is True: path_simulation_output_json = os.path.join(path_simulation_output_json, JSON_WITH_RESULTS) if os.path.exists(path_simulation_output_json) is False: raise FileNotFoundError( "{} not found. You need to run a simulation to generate the data to report" "see `python mvs_tool.py -h` for help".format( path_simulation_output_json)) else: # path to the mvs simulation output files path_sim_output = os.path.dirname(path_simulation_output_json) # if report path is not specified it will be included in the mvs simulation outputs folder if path_pdf_report == "": path_pdf_report = os.path.join(path_sim_output, REPORT_FOLDER, PDF_REPORT) # load the results of a simulation dict_values = data_input.load_json(path_simulation_output_json) test_app = create_app(dict_values, path_sim_output=path_sim_output) banner = "*" * 40 print(banner + "\nPress ctrl+c to stop the report server\n" + banner) if pdf is True: print_pdf(test_app, path_pdf_report=path_pdf_report) else: # run the dash server for 600s before shutting it down open_in_browser(test_app, timeout=600) print( banner + "\nThe report server has timed out.\nTo start it again run `python " "mvs_report.py`.\nTo let it run for a longer time, change timeout setting in " "the mvs_report.py file\n" + banner)
def test_if_png_pdf_not_active(self, m_args): user_inputs = A0.process_user_arguments() assert "path_png_figs" in user_inputs.keys() assert "path_pdf_report" not in user_inputs.keys()
def test_if_csv_opt_and_csv_elements_folder_not_in_input_folder_raise_filenotfound_error( self, m_args, tmpdir): with pytest.raises(FileNotFoundError): A0.process_user_arguments(path_input_folder=tmpdir)
def test_if_path_output_folder_recursive_create_full_path(self, m_args): A0.process_user_arguments() assert os.path.exists( os.path.join(self.test_out_path, "recursive_folder"))
def test_if_json_opt_and_no_json_file_in_input_folder_raise_filenotfound_error( self, m_args, tmpdir): """provide a temporary dir with no .json in it""" with pytest.raises(FileNotFoundError): A0.process_user_arguments(path_input_folder=tmpdir)
def test_if_pdf_opt_the_key_path_pdf_report_exists_in_user_inputs( self, m_args): user_inputs = A0.process_user_arguments() assert user_inputs["path_pdf_report"] == os.path.join( self.test_out_path, REPORT_FOLDER, PDF_REPORT) assert "path_png_figs" not in user_inputs.keys()
def test_input_folder_is_copied_in_output_within_folder_named_input( self, m_args): A0.process_user_arguments() assert os.path.exists(os.path.join(self.test_out_path, INPUTS_COPY))
class TestCommandLineInput: parser = A0.mvs_arg_parser() def test_input_folder(self): parsed = self.parser.parse_args(["-i", "input_folder"]) assert parsed.path_input_folder == "input_folder" def test_default_input_folder(self): parsed = self.parser.parse_args([]) assert parsed.path_input_folder == DEFAULT_INPUT_PATH def test_input_json_ext(self): parsed = self.parser.parse_args(["-ext", JSON_EXT]) assert parsed.input_type == JSON_EXT def test_input_csv_ext(self): parsed = self.parser.parse_args(["-ext", CSV_EXT]) assert parsed.input_type == CSV_EXT def test_default_input_type(self): parsed = self.parser.parse_args([]) assert parsed.input_type == JSON_EXT def test_ext_not_accepting_other_choices(self): with pytest.raises(SystemExit) as argparse_error: parsed = self.parser.parse_args(["-ext", "something"]) assert str(argparse_error.value) == "2" def test_output_folder(self): parsed = self.parser.parse_args(["-o", "output_folder"]) assert parsed.path_output_folder == "output_folder" def test_default_output_folder(self): parsed = self.parser.parse_args([]) assert parsed.path_output_folder == DEFAULT_OUTPUT_PATH def test_overwrite_false_by_default(self): parsed = self.parser.parse_args([]) assert parsed.overwrite is False def test_overwrite_activation(self): parsed = self.parser.parse_args(["-f"]) assert parsed.overwrite is True def test_pdf_report_false_by_default(self): parsed = self.parser.parse_args([]) assert parsed.pdf_report is False def test_pdf_report_activation(self): parsed = self.parser.parse_args(["-pdf"]) assert parsed.pdf_report is True def test_png_activation(self): parsed = self.parser.parse_args(["-png"]) assert parsed.save_png is True def test_log_assignation(self): parsed = self.parser.parse_args(["-log", "debug"]) assert parsed.display_output == "debug" def test_log_default(self): parsed = self.parser.parse_args([]) assert parsed.display_output == "info" def test_log_not_accepting_other_choices(self): with pytest.raises(SystemExit) as argparse_error: parsed = self.parser.parse_args(["-log", "something"]) assert str(argparse_error.value) == "2" # this ensure that the test is only ran if explicitly executed, # ie not when the `pytest` command alone it called @pytest.mark.skipif( EXECUTE_TESTS_ON not in (TESTS_ON_MASTER), reason="Benchmark test deactivated, set env variable " "EXECUTE_TESTS_ON to 'master' to run this test", ) @mock.patch( "argparse.ArgumentParser.parse_args", return_value=PARSER.parse_args([ "-f", "-log", "warning", "-i", os.path.join(TEST_REPO_PATH, INPUT_FOLDER), "-o", TEST_OUTPUT_PATH, ]), ) def test_user_defined_output_path(self, mock_args): main() assert os.path.exists(TEST_OUTPUT_PATH) def teardown_method(self): if os.path.exists(TEST_OUTPUT_PATH): shutil.rmtree(TEST_OUTPUT_PATH, ignore_errors=True)
def test_if_both_pdf_and_png_opt_raises_no_error(self, m_args): user_inputs = A0.process_user_arguments() assert "path_png_figs" in user_inputs.keys() assert "path_pdf_report" in user_inputs.keys()
def test_if_log_opt_display_output_is_set_with_correct_value(self, m_args): A0.process_user_arguments() # TODO check the logging level assert 1
def test_input_folder_not_existing_raise_filenotfound_error(self, m_args): with pytest.raises(FileNotFoundError): A0.process_user_arguments()
def test_if_no_pdf_opt_the_key_path_pdf_report_does_not_exist_in_user_inputs( self, m_args): user_inputs = A0.process_user_arguments() assert "path_pdf_report" not in user_inputs.keys()