def test_queue_name(scheduler_and_fs: Tuple[Scheduler, FileSystem]) -> None: sched, fs, _ = scheduler_and_fs if isinstance(sched, DirectGnuScheduler): # this scheduler ignores queues return job_desc = JobDescription() job_desc.working_directory = '/home/cerulean' job_desc.command = 'echo' job_desc.arguments = ['$SLURM_JOB_PARTITION', '$PBS_QUEUE'] job_desc.queue_name = 'batch' job_desc.stdout_file = '/home/cerulean/test_queue_name.out' job_id = sched.submit(job_desc) print('Job id: {}'.format(job_id)) while sched.get_status(job_id) != JobStatus.DONE: time.sleep(10.0) retval = sched.get_exit_code(job_id) assert retval == 0 outfile = fs / 'home/cerulean/test_queue_name.out' assert 'batch' in outfile.read_text() outfile.unlink()
def test_num_nodes(scheduler_and_fs: Tuple[Scheduler, FileSystem]) -> None: sched, fs, _ = scheduler_and_fs if isinstance(sched, DirectGnuScheduler): # this scheduler runs everything on the same node # and ignores the num_nodes attribute return job_desc = JobDescription() job_desc.working_directory = '/home/cerulean' job_desc.num_nodes = 2 if isinstance(sched, TorqueScheduler): job_desc.command = 'wc' job_desc.arguments = ['-l', '$PBS_NODEFILE'] elif isinstance(sched, SlurmScheduler): job_desc.command = 'echo' job_desc.arguments = ['$SLURM_JOB_NUM_NODES'] job_desc.queue_name = 'batch' job_desc.stdout_file = '/home/cerulean/test_num_nodes.out' job_id = sched.submit(job_desc) while sched.get_status(job_id) != JobStatus.DONE: time.sleep(10.0) outfile = fs / 'home/cerulean/test_num_nodes.out' num_nodes_output = outfile.read_text() assert '2' in outfile.read_text() outfile.unlink()
def test_job_script_queue_name() -> None: # Note: doesn't test that it works, that's what test_scheduler is for job_desc = JobDescription() job_desc.queue_name = 'testing_queue' script = _job_desc_to_job_script(job_desc) assert '#SBATCH --partition=testing_queue' in script