def test_submit_script(self): """ Test the creation of a simple submission script. """ from aiida.schedulers.datastructures import JobTemplate from aiida.common.datastructures import CodeInfo, CodeRunMode scheduler = SlurmScheduler() job_tmpl = JobTemplate() job_tmpl.shebang = '#!/bin/bash' job_tmpl.uuid = str(uuid.uuid4()) job_tmpl.job_resource = scheduler.create_job_resource(num_machines=1, num_mpiprocs_per_machine=1) job_tmpl.max_wallclock_seconds = 24 * 3600 code_info = CodeInfo() code_info.cmdline_params = ['mpirun', '-np', '23', 'pw.x', '-npool', '1'] code_info.stdin_name = 'aiida.in' job_tmpl.codes_info = [code_info] job_tmpl.codes_run_mode = CodeRunMode.SERIAL submit_script_text = scheduler.get_submit_script(job_tmpl) assert submit_script_text.startswith('#!/bin/bash') assert '#SBATCH --no-requeue' in submit_script_text assert '#SBATCH --time=1-00:00:00' in submit_script_text assert '#SBATCH --nodes=1' in submit_script_text assert "'mpirun' '-np' '23' 'pw.x' '-npool' '1' < 'aiida.in'" in submit_script_text
def test_submit_script_with_num_cores_per_machine_and_mpiproc1(self): # pylint: disable=invalid-name """ Test to verify if scripts works fine if we pass both num_cores_per_machine and num_cores_per_mpiproc correct values. It should pass in check: res.num_cores_per_mpiproc * res.num_mpiprocs_per_machine = res.num_cores_per_machine """ from aiida.schedulers.datastructures import JobTemplate from aiida.common.datastructures import CodeInfo, CodeRunMode scheduler = SlurmScheduler() job_tmpl = JobTemplate() job_tmpl.shebang = '#!/bin/bash' job_tmpl.job_resource = scheduler.create_job_resource( num_machines=1, num_mpiprocs_per_machine=1, num_cores_per_machine=24, num_cores_per_mpiproc=24 ) job_tmpl.uuid = str(uuid.uuid4()) job_tmpl.max_wallclock_seconds = 24 * 3600 code_info = CodeInfo() code_info.cmdline_params = ['mpirun', '-np', '23', 'pw.x', '-npool', '1'] code_info.stdin_name = 'aiida.in' job_tmpl.codes_info = [code_info] job_tmpl.codes_run_mode = CodeRunMode.SERIAL submit_script_text = scheduler.get_submit_script(job_tmpl) assert '#SBATCH --no-requeue' in submit_script_text assert '#SBATCH --time=1-00:00:00' in submit_script_text assert '#SBATCH --nodes=1' in submit_script_text assert '#SBATCH --ntasks-per-node=1' in submit_script_text assert '#SBATCH --cpus-per-task=24' in submit_script_text assert "'mpirun' '-np' '23' 'pw.x' '-npool' '1' < 'aiida.in'" in submit_script_text
def test_submit_script_bad_shebang(self): """Test that first line of submit script is as expected.""" from aiida.schedulers.datastructures import JobTemplate from aiida.common.datastructures import CodeInfo, CodeRunMode scheduler = SlurmScheduler() code_info = CodeInfo() code_info.cmdline_params = [ 'mpirun', '-np', '23', 'pw.x', '-npool', '1' ] code_info.stdin_name = 'aiida.in' for (shebang, expected_first_line) in ((None, '#!/bin/bash'), ('', ''), ('NOSET', '#!/bin/bash')): job_tmpl = JobTemplate() if shebang == 'NOSET': pass else: job_tmpl.shebang = shebang job_tmpl.job_resource = scheduler.create_job_resource( num_machines=1, num_mpiprocs_per_machine=1) job_tmpl.codes_info = [code_info] job_tmpl.codes_run_mode = CodeRunMode.SERIAL submit_script_text = scheduler.get_submit_script(job_tmpl) # This tests if the implementation correctly chooses the default: self.assertEqual( submit_script_text.split('\n')[0], expected_first_line)