def full_bout_runner_tester( submitter_type: Type[AbstractSubmitter], make_project: Path, yield_number_of_rows_for_all_tables: Callable[[DatabaseReader], Dict[str, int]], file_state_restorer: FileStateRestorer, ) -> None: """ Test that the BoutRunner can execute a run. This test will test that: 1. We can execute a run 2. The metadata is properly stored Parameters ---------- submitter_type : type Submitter type to check for make_project : Path The path to the conduction example yield_number_of_rows_for_all_tables : function Function which returns the number of rows for all tables in a schema file_state_restorer : FileStateRestorer Object for restoring files to original state """ name = "test_bout_runner_integration" run_group = make_run_group( {"name": name, "run_graph": None, "waiting_for": None}, make_project, file_state_restorer, ) # Run the project runner = BoutRunner(run_group.run_graph) runner.run() runner.wait_until_completed() assert isinstance( runner.run_graph["bout_run_test_bout_runner_integration"]["submitter"], submitter_type, ) # Assert that the run went well db_reader = assert_first_run( run_group.bout_paths, run_group.db_connector, ) # Assert that all the values are 1 assert_tables_have_expected_len( db_reader, yield_number_of_rows_for_all_tables, expected_run_number=1 )
def test_full_bout_runner( make_project: Path, yield_number_of_rows_for_all_tables: Callable[[DatabaseReader], Dict[str, int]], clean_default_db_dir: Path, ) -> None: """ Test that the BoutRunner can execute a run. This test will test that: 1. We can execute a run 2. The metadata is properly stored Parameters ---------- make_project : Path The path to the conduction example yield_number_of_rows_for_all_tables : function Function which returns the number of rows for all tables in a schema clean_default_db_dir : Path Path to the default database directory """ _ = clean_default_db_dir name = "test_bout_runner_integration" run_group = make_run_group(name, make_project) # Run the project runner = BoutRunner(run_group.run_graph) runner.run() # Assert that the run went well db_reader = assert_first_run( run_group.bout_paths, run_group.db_connector, ) # Assert that all the values are 1 assert_tables_have_expected_len(db_reader, yield_number_of_rows_for_all_tables, expected_run_number=1)
def bout_runner_from_path_tester( submitter_type: Type[AbstractSubmitter], project_path: Path, yield_number_of_rows_for_all_tables: Callable[[DatabaseReader], Dict[str, int]], file_state_restorer: FileStateRestorer, ) -> None: """ Test that the minimal BoutRunners setup works. This test will test that: 1. We can execute a run from the (mocked) current work directory 2. The correct submitter has been used 3. The metadata is properly stored 4. We cannot execute the run again... 5. ...unless we set force=True 6. Check the restart functionality twice Parameters ---------- submitter_type : type Submitter type to check for project_path : Path The path to the conduction example yield_number_of_rows_for_all_tables : function Function which returns the number of rows for all tables in a schema file_state_restorer : FileStateRestorer Object for restoring files to original state """ # NOTE: This triggers too-many-statements (51/50) # pylint: disable=too-many-statements logging.info("Start: First run") # Make project to save time _ = project_path file_state_restorer.add(project_path.joinpath("conduction.db")) file_state_restorer.add(project_path.joinpath("settings_run")) with change_directory(project_path): runner = BoutRunner() bout_run_setup = runner.run_graph["bout_run_0"]["bout_run_setup"] file_state_restorer.add( bout_run_setup.bout_paths.bout_inp_dst_dir, force_mark_removal=True ) runner.run() runner.wait_until_completed() assert isinstance(bout_run_setup.executor.submitter, submitter_type) bout_paths = bout_run_setup.bout_paths db_connector = bout_run_setup.db_connector # Assert that the run went well db_reader = assert_first_run(bout_paths, db_connector) # Assert that the number of runs is 1 assert_tables_have_expected_len( db_reader, yield_number_of_rows_for_all_tables, expected_run_number=1 ) logging.info("Done: First run") logging.info("Start: Check RuntimeError") # Check that all the nodes have changed status with pytest.raises(RuntimeError): runner.run() runner.wait_until_completed() logging.info("Done: Check RuntimeError") logging.info("Start: Assert that run will not be run again") # Check that the run will not be executed again runner.reset() runner.run() runner.wait_until_completed() # Assert that the number of runs is 1 assert_tables_have_expected_len( db_reader, yield_number_of_rows_for_all_tables, expected_run_number=1 ) logging.info("Done: Assert that run will not be run again") logging.info("Start: Run with force=True") # Check that force overrides the behaviour runner.run(force=True) runner.wait_until_completed() assert_tables_have_expected_len( db_reader, yield_number_of_rows_for_all_tables, expected_run_number=2 ) logging.info("Done: Run with force=True") logging.info("Start: Run with restart_all=True the first time") dump_dir_parent = bout_paths.bout_inp_dst_dir.parent dump_dir_name = bout_paths.bout_inp_dst_dir.name file_state_restorer.add(dump_dir_parent.joinpath(f"{dump_dir_name}_restart_0")) # Check that the restart functionality works runner.run(restart_all=True) runner.wait_until_completed() assert_tables_have_expected_len( db_reader, yield_number_of_rows_for_all_tables, expected_run_number=3, restarted=True, ) # NOTE: The test in tests.unit.bout_runners.runners.test_bout_runner is testing # restart_from_bout_inp_dst=True, whether this is testing restart_all=True assert_dump_files_exist(dump_dir_parent.joinpath(f"{dump_dir_name}_restart_0")) logging.info("Done: Run with restart_all=True the first time") logging.info("Start: Run with restart_all=True the second time") # ...twice file_state_restorer.add(dump_dir_parent.joinpath(f"{dump_dir_name}_restart_1")) runner.run(restart_all=True) runner.wait_until_completed() assert_tables_have_expected_len( db_reader, yield_number_of_rows_for_all_tables, expected_run_number=4, restarted=True, ) # NOTE: The test in tests.unit.bout_runners.runners.test_bout_runner is testing # restart_from_bout_inp_dst=True, whether this is testing restart_all=True assert_dump_files_exist(dump_dir_parent.joinpath(f"{dump_dir_name}_restart_1")) simulation_steps = LogReader( dump_dir_parent.joinpath(f"{dump_dir_name}_restart_1", "BOUT.log.0") ).get_simulation_steps() assert np.isclose( simulation_steps.loc[simulation_steps.index[-1], "Sim_time"], 30.0 ) logging.info("Done: Run with restart_all=True the second time")
def test_bout_runners_from_directory( make_project: Path, yield_number_of_rows_for_all_tables: Callable[[DatabaseReader], Dict[str, int]], clean_default_db_dir: Path, tear_down_restart_directories: Callable[[Path], None], ) -> None: """ Test that the minimal BoutRunners setup works. This test will test that: 1. We can execute a run from the (mocked) current work directory 2. The metadata is properly stored 3. We cannot execute the run again... 4. ...unless we set force=True 5. Check the restart functionality twice Parameters ---------- make_project : Path The path to the conduction example yield_number_of_rows_for_all_tables : function Function which returns the number of rows for all tables in a schema clean_default_db_dir : Path Path to the default database directory tear_down_restart_directories : function Function used for removal of restart directories """ # For automatic clean-up _ = clean_default_db_dir # Make project to save time project_path = make_project with change_directory(project_path): runner = BoutRunner() bout_run_setup = runner.run_graph["bout_run_0"]["bout_run_setup"] runner.run() bout_paths = bout_run_setup.bout_paths tear_down_restart_directories(bout_run_setup.bout_paths.bout_inp_dst_dir) db_connector = bout_run_setup.db_connector # Assert that the run went well db_reader = assert_first_run(bout_paths, db_connector) # Assert that the number of runs is 1 assert_tables_have_expected_len(db_reader, yield_number_of_rows_for_all_tables, expected_run_number=1) # Check that all the nodes have changed status with pytest.raises(RuntimeError): runner.run() # Check that the run will not be executed again runner.reset() runner.run() # Assert that the number of runs is 1 assert_tables_have_expected_len(db_reader, yield_number_of_rows_for_all_tables, expected_run_number=1) # Check that force overrides the behaviour runner.run(force=True) assert_tables_have_expected_len(db_reader, yield_number_of_rows_for_all_tables, expected_run_number=2) dump_dir_parent = bout_paths.bout_inp_dst_dir.parent dump_dir_name = bout_paths.bout_inp_dst_dir.name # Check that the restart functionality works runner.run(restart_all=True) expected_run_number = 3 assert_tables_have_expected_len( db_reader, yield_number_of_rows_for_all_tables, expected_run_number=expected_run_number, restarted=True, ) # NOTE: The test in tests.unit.bout_runners.runners.test_bout_runner is testing # restart_from_bout_inp_dst=True, whether this is testing restart_all=True assert_dump_files_exist( dump_dir_parent.joinpath(f"{dump_dir_name}_restart_0")) # ...twice runner.run(restart_all=True) expected_run_number = 4 assert_tables_have_expected_len( db_reader, yield_number_of_rows_for_all_tables, expected_run_number=expected_run_number, restarted=True, ) # NOTE: The test in tests.unit.bout_runners.runners.test_bout_runner is testing # restart_from_bout_inp_dst=True, whether this is testing restart_all=True assert_dump_files_exist( dump_dir_parent.joinpath(f"{dump_dir_name}_restart_1"))
def test_run_bout_run( make_project: Path, clean_default_db_dir: Path, get_bout_run_setup: Callable[[str], BoutRunSetup], yield_number_of_rows_for_all_tables: Callable[[DatabaseReader], Dict[str, int]], tear_down_restart_directories: Callable[[Path], None], ) -> None: """ Test the BOUT++ run method. Parameters ---------- make_project : Path The path to the conduction example clean_default_db_dir : Path Path to the default database dir get_bout_run_setup : function Function which returns the BoutRunSetup object based on the conduction directory yield_number_of_rows_for_all_tables : function Function which returns the number of rows for all tables in a schema tear_down_restart_directories : function Function used for removal of restart directories """ # For automatic clean-up _ = clean_default_db_dir # Make project to save time _ = make_project run_graph = RunGraph() runner = BoutRunner(run_graph) bout_run_setup = get_bout_run_setup("test_run_bout_run") tear_down_restart_directories(bout_run_setup.bout_paths.bout_inp_dst_dir) bout_paths = bout_run_setup.bout_paths db_connector = bout_run_setup.db_connector # Run once submitter = runner.run_bout_run(bout_run_setup) if submitter is not None: submitter.wait_until_completed() # Assert that the run went well database_reader = assert_first_run(bout_paths, db_connector) # Assert that the number of runs is 1 assert_tables_have_expected_len(database_reader, yield_number_of_rows_for_all_tables, expected_run_number=1) # Check that the run will not be executed again assert runner.run_bout_run(bout_run_setup) is None # Assert that the number of runs is 1 assert_tables_have_expected_len(database_reader, yield_number_of_rows_for_all_tables, expected_run_number=1) # Check that force overrides the behaviour submitter = runner.run_bout_run(bout_run_setup, force=True) if submitter is not None: submitter.wait_until_completed() assert_tables_have_expected_len(database_reader, yield_number_of_rows_for_all_tables, expected_run_number=2) dump_dir_parent = bout_paths.bout_inp_dst_dir.parent dump_dir_name = bout_paths.bout_inp_dst_dir.name # Check that restart makes another entry submitter = runner.run_bout_run(bout_run_setup, restart_from_bout_inp_dst=True) if submitter is not None: submitter.wait_until_completed() assert_tables_have_expected_len( database_reader, yield_number_of_rows_for_all_tables, expected_run_number=3, restarted=True, ) # NOTE: The test in tests.unit.bout_runners.runner.test_bout_runner is testing # restart_all=True, whether this is testing restart_from_bout_inp_dst=True assert_dump_files_exist( dump_dir_parent.joinpath(f"{dump_dir_name}_restart_0")) # ...and yet another entry submitter = runner.run_bout_run(bout_run_setup, restart_from_bout_inp_dst=True) if submitter is not None: submitter.wait_until_completed() assert_tables_have_expected_len( database_reader, yield_number_of_rows_for_all_tables, expected_run_number=4, restarted=True, ) # NOTE: The test in tests.unit.bout_runners.runner.test_bout_runner is testing # restart_all=True, whether this is testing restart_from_bout_inp_dst=True assert_dump_files_exist( dump_dir_parent.joinpath(f"{dump_dir_name}_restart_1"))
def test_run_bout_run( make_project: Path, get_bout_run_setup: Callable[[str], BoutRunSetup], yield_number_of_rows_for_all_tables: Callable[[DatabaseReader], Dict[str, int]], file_state_restorer: FileStateRestorer, ) -> None: """ Test the BOUT++ run method. Parameters ---------- make_project : Path The path to the conduction example get_bout_run_setup : function Function which returns the BoutRunSetup object based on the conduction directory yield_number_of_rows_for_all_tables : function Function which returns the number of rows for all tables in a schema file_state_restorer : FileStateRestorer Object for restoring files to original state """ # Make project to save time _ = make_project run_graph = RunGraph() runner = BoutRunner(run_graph) bout_run_setup = get_bout_run_setup("test_run_bout_run") bout_paths = bout_run_setup.bout_paths db_connector = bout_run_setup.db_connector # NOTE: bout_run_setup.bout_paths.bout_inp_dst_dir will be removed in the # yield_bout_path_conduction fixture (through the get_bout_run_setup # fixture) # Hence we do not need to add bout_run_setup.bout_paths.bout_inp_dst_dir # to the file_state_restorer file_state_restorer.add(db_connector.db_path, force_mark_removal=True) # Run once submitter = bout_run_setup.submitter if runner.run_bout_run(bout_run_setup): submitter.wait_until_completed() # Assert that the run went well database_reader = assert_first_run(bout_paths, db_connector) # Assert that the number of runs is 1 assert_tables_have_expected_len(database_reader, yield_number_of_rows_for_all_tables, expected_run_number=1) # Check that the run will not be executed again assert not runner.run_bout_run(bout_run_setup) # Assert that the number of runs is 1 assert_tables_have_expected_len(database_reader, yield_number_of_rows_for_all_tables, expected_run_number=1) # Check that force overrides the behaviour if runner.run_bout_run(bout_run_setup, force=True): submitter.wait_until_completed() assert_tables_have_expected_len(database_reader, yield_number_of_rows_for_all_tables, expected_run_number=2) dump_dir_parent = bout_paths.bout_inp_dst_dir.parent dump_dir_name = bout_paths.bout_inp_dst_dir.name # Check that restart makes another entry bout_run_setup.executor.restart_from = bout_run_setup.bout_paths.bout_inp_dst_dir copy_restart_files(bout_run_setup.executor.restart_from, bout_run_setup.bout_paths.bout_inp_dst_dir) if runner.run_bout_run(bout_run_setup): submitter.wait_until_completed() expected_run_number = 3 assert_tables_have_expected_len( database_reader, yield_number_of_rows_for_all_tables, expected_run_number=expected_run_number, restarted=True, ) # NOTE: The test in tests.unit.bout_runners.runner.test_bout_runner is testing # restart_all=True, whether this is testing restart_from_bout_inp_dst=True assert_dump_files_exist( dump_dir_parent.joinpath(f"{dump_dir_name}_restart_0")) file_state_restorer.add( dump_dir_parent.joinpath(f"{dump_dir_name}_restart_0"), force_mark_removal=True) # ...and yet another entry bout_run_setup.executor.restart_from = bout_run_setup.bout_paths.bout_inp_dst_dir copy_restart_files(bout_run_setup.executor.restart_from, bout_run_setup.bout_paths.bout_inp_dst_dir) if runner.run_bout_run(bout_run_setup): submitter.wait_until_completed() assert_tables_have_expected_len( database_reader, yield_number_of_rows_for_all_tables, expected_run_number=expected_run_number + 1, restarted=True, ) # NOTE: The test in tests.unit.bout_runners.runner.test_bout_runner is testing # restart_all=True, whether this is testing restart_from_bout_inp_dst=True assert_dump_files_exist( dump_dir_parent.joinpath(f"{dump_dir_name}_restart_1")) file_state_restorer.add( dump_dir_parent.joinpath(f"{dump_dir_name}_restart_1"), force_mark_removal=True)