def test_submit_script_with_num_cores_per_mpiproc(self): """ Test to verify if scripts works fine if we pass only num_cores_per_mpiproc value """ from aiida.schedulers.datastructures import JobTemplate from aiida.common.datastructures import CodeInfo, CodeRunMode scheduler = PbsproScheduler() job_tmpl = JobTemplate() job_tmpl.shebang = '#!/bin/bash' job_tmpl.job_resource = scheduler.create_job_resource( num_machines=1, num_mpiprocs_per_machine=1, num_cores_per_mpiproc=24 ) job_tmpl.uuid = str(uuid.uuid4()) job_tmpl.max_wallclock_seconds = 24 * 3600 code_info = CodeInfo() code_info.cmdline_params = ['mpirun', '-np', '23', 'pw.x', '-npool', '1'] code_info.stdin_name = 'aiida.in' job_tmpl.codes_info = [code_info] job_tmpl.codes_run_mode = CodeRunMode.SERIAL submit_script_text = scheduler.get_submit_script(job_tmpl) self.assertTrue('#PBS -r n' in submit_script_text) self.assertTrue(submit_script_text.startswith('#!/bin/bash')) self.assertTrue('#PBS -l select=1:mpiprocs=1:ppn=24' in submit_script_text) # Note: here 'num_cores_per_machine' should NOT override the mpiprocs self.assertTrue("'mpirun' '-np' '23' 'pw.x' '-npool' '1'" + " < 'aiida.in'" in submit_script_text)
def test_submit_script_bad_shebang(self): """ Test to verify if scripts works fine with default options """ from aiida.schedulers.datastructures import JobTemplate from aiida.common.datastructures import CodeInfo, CodeRunMode scheduler = PbsproScheduler() code_info = CodeInfo() code_info.cmdline_params = ['mpirun', '-np', '23', 'pw.x', '-npool', '1'] code_info.stdin_name = 'aiida.in' for (shebang, expected_first_line) in ((None, '#!/bin/bash'), ('', ''), ('NOSET', '#!/bin/bash')): job_tmpl = JobTemplate() if shebang == 'NOSET': pass else: job_tmpl.shebang = shebang job_tmpl.job_resource = scheduler.create_job_resource(num_machines=1, num_mpiprocs_per_machine=1) job_tmpl.codes_info = [code_info] job_tmpl.codes_run_mode = CodeRunMode.SERIAL submit_script_text = scheduler.get_submit_script(job_tmpl) # This tests if the implementation correctly chooses the default: self.assertEqual(submit_script_text.split('\n')[0], expected_first_line)
def test_submit_script(self): """ Test to verify if scripts works fine with default options """ from aiida.schedulers.datastructures import JobTemplate from aiida.common.datastructures import CodeInfo, CodeRunMode scheduler = PbsproScheduler() job_tmpl = JobTemplate() job_tmpl.shebang = '#!/bin/bash -l' job_tmpl.job_resource = scheduler.create_job_resource(num_machines=1, num_mpiprocs_per_machine=1) job_tmpl.uuid = str(uuid.uuid4()) job_tmpl.max_wallclock_seconds = 24 * 3600 code_info = CodeInfo() code_info.cmdline_params = ['mpirun', '-np', '23', 'pw.x', '-npool', '1'] code_info.stdin_name = 'aiida.in' job_tmpl.codes_info = [code_info] job_tmpl.codes_run_mode = CodeRunMode.SERIAL submit_script_text = scheduler.get_submit_script(job_tmpl) self.assertTrue('#PBS -r n' in submit_script_text) self.assertTrue(submit_script_text.startswith('#!/bin/bash -l')) self.assertTrue('#PBS -l walltime=24:00:00' in submit_script_text) self.assertTrue('#PBS -l select=1' in submit_script_text) self.assertTrue("'mpirun' '-np' '23' 'pw.x' '-npool' '1'" + \ " < 'aiida.in'" in submit_script_text)
def test_parse_with_unexpected_newlines(self): """ Test whether _parse_joblist can parse the qstat -f output also when there are unexpected newlines """ # pylint: disable=too-many-locals scheduler = PbsproScheduler() retval = 0 stdout = text_qstat_f_to_test_with_unexpected_newlines stderr = '' job_list = scheduler._parse_joblist_output(retval, stdout, stderr) # The parameters are hard coded in the text to parse job_on_cluster = 10 job_parsed = len(job_list) self.assertEqual(job_parsed, job_on_cluster) job_running = 2 job_running_parsed = len([j for j in job_list if j.job_state and j.job_state == JobState.RUNNING]) self.assertEqual(job_running, job_running_parsed) job_held = 1 job_held_parsed = len([j for j in job_list if j.job_state and j.job_state == JobState.QUEUED_HELD]) self.assertEqual(job_held, job_held_parsed) job_queued = 5 job_queued_parsed = len([j for j in job_list if j.job_state and j.job_state == JobState.QUEUED]) self.assertEqual(job_queued, job_queued_parsed) running_users = ['somebody', 'user_556491'] parsed_running_users = [j.job_owner for j in job_list if j.job_state and j.job_state == JobState.RUNNING] self.assertEqual(set(running_users), set(parsed_running_users)) running_jobs = ['555716', '556491'] parsed_running_jobs = [j.job_id for j in job_list if j.job_state and j.job_state == JobState.RUNNING] self.assertEqual(set(running_jobs), set(parsed_running_jobs)) for j in job_list: if j.allocated_machines: num_machines = 0 num_cpus = 0 for n in j.allocated_machines: num_machines += 1 num_cpus += n.num_cpus self.assertTrue(j.num_machines == num_machines) self.assertTrue(j.num_cpus == num_cpus)
def test_parse_common_joblist_output(self): """ Test whether _parse_joblist can parse the qstat -f output """ # pylint: disable=too-many-locals scheduler = PbsproScheduler() retval = 0 stdout = text_qstat_f_to_test stderr = '' job_list = scheduler._parse_joblist_output(retval, stdout, stderr) # The parameters are hard coded in the text to parse job_on_cluster = 6 job_parsed = len(job_list) self.assertEqual(job_parsed, job_on_cluster) job_running = 2 job_running_parsed = len([j for j in job_list if j.job_state and j.job_state == JobState.RUNNING]) self.assertEqual(job_running, job_running_parsed) job_held = 2 job_held_parsed = len([j for j in job_list if j.job_state and j.job_state == JobState.QUEUED_HELD]) self.assertEqual(job_held, job_held_parsed) job_queued = 2 job_queued_parsed = len([j for j in job_list if j.job_state and j.job_state == JobState.QUEUED]) self.assertEqual(job_queued, job_queued_parsed) running_users = ['user02', 'user3'] parsed_running_users = [j.job_owner for j in job_list if j.job_state and j.job_state == JobState.RUNNING] self.assertEqual(set(running_users), set(parsed_running_users)) running_jobs = ['69301.mycluster', '74164.mycluster'] parsed_running_jobs = [j.job_id for j in job_list if j.job_state and j.job_state == JobState.RUNNING] self.assertEqual(set(running_jobs), set(parsed_running_jobs)) for j in job_list: if j.allocated_machines: num_machines = 0 num_cpus = 0 for n in j.allocated_machines: num_machines += 1 num_cpus += n.num_cpus self.assertTrue(j.num_machines == num_machines) self.assertTrue(j.num_cpus == num_cpus)
def test_submit_script_with_num_cores_per_machine_and_mpiproc2(self): """ Test to verify if scripts works fine if we pass num_cores_per_machine and num_cores_per_mpiproc wrong values. It should fail in check: res.num_cores_per_mpiproc * res.num_mpiprocs_per_machine = res.num_cores_per_machine """ from aiida.schedulers.datastructures import JobTemplate scheduler = PbsproScheduler() job_tmpl = JobTemplate() with self.assertRaises(ValueError): job_tmpl.job_resource = scheduler.create_job_resource( num_machines=1, num_mpiprocs_per_machine=1, num_cores_per_machine=24, num_cores_per_mpiproc=23 )