def run_simulation_with_PJ(self, sh_jobs_scripts): """ running simulation from SWEEP dir using PJ """ from qcg.pilotjob.api.job import Jobs jobs = Jobs() for sh_job_scripts in sh_jobs_scripts: sweep_dir_name = os.path.basename(os.path.dirname(sh_job_scripts)) jobs.add( name="SWEEP_{}".format(sweep_dir_name), exec="bash", args=["-l", sh_job_scripts], stdout="{}/{}.stdout".format(os.path.dirname(sh_job_scripts), "${jname}__${uniq}"), stderr="{}/{}.stderr".format(os.path.dirname(sh_job_scripts), "${jname}__${uniq}"), numCores={"exact": self.cores}, model="default") print("\nAdd job with :") print("name=SWEEP_{}".format(sweep_dir_name)) print("args = [-l,{}]".format(sh_job_scripts)) print("stdout = {}/{}.stdout".format( os.path.dirname(sh_job_scripts), "${jname}__${uniq}")) print("stderr = {}/{}.stderr".format( os.path.dirname(sh_job_scripts), "${jname}__${uniq}")) print("numCores=exact: {}".format(self.cores)) ids = QCG_MANAGER.submit(jobs) # wait until submited jobs finish QCG_MANAGER.wait4(ids) print("\nAll new SWEEP dirs are finished...\n")
def submit(self, fn: Callable[..., Union[str, Tuple[str, Dict[str, Any]]]], *args, **kwargs): """Submits a specific task to the QCG-PJ manager using template-based, executor-like interface. Parameters ---------- fn : Callable A callable that returns a tuple representing a task's template. The first element of the tuple should be a string containing a QCG-PilotJob task's description with placeholders (identifiers preceded by $ symbol) and the second a dictionary that assigns default values for selected placeholders. *args: variable length list with dicts, optional A set of dicts which contain parameters that will be used to substitute placeholders defined in the template. Note: *args overwrite defaults, but they are overwritten by **kwargs **kwargs: arbitrary keyword arguments A set of keyword arguments that will be used to substitute placeholders defined in the template. Note: **kwargs overwrite *args and defaults. Returns ------- QCGPJFuture The QCGPJFuture object assigned with the submitted task """ template = fn() if isinstance(template, tuple): template_str = template[0] defaults = template[1] else: template_str = template defaults = {} t = Template(textwrap.dedent(template_str)) substitutions = {} for a in args: if a is not None: substitutions.update(a) substitutions.update(kwargs) td_str = t.substitute(defaults, **substitutions) td = ast.literal_eval(td_str) if 'env' not in td['execution']: td['execution']['env'] = {} td['execution']['env']['QCG_PM_EXEC_API_JOB_ID'] = '${jname}' jobs = Jobs() jobs.add_std(td) jobs_ids = self._qcgpjm.submit(jobs) return QCGPJFuture(jobs_ids, self._qcgpjm)
def test_slurmenv_api_iteration_simple(): if not in_slurm_allocation() or get_num_slurm_nodes() < 2: pytest.skip('test not run in slurm allocation or allocation is smaller than 2 nodes') resources, allocation = get_slurm_resources_binded() set_pythonpath_to_qcg_module() tmpdir = str(tempfile.mkdtemp(dir=SHARED_PATH)) try: m = LocalManager(['--log', 'debug', '--wd', tmpdir, '--report-format', 'json'], {'wdir': str(tmpdir)}) its = 2 jobs = Jobs(). \ add_std({ 'name': 'host', 'iteration': { 'stop': its }, 'execution': { 'exec': 'hostname', 'args': [ '--fqdn' ], 'stdout': 'out' }, 'resources': { 'numCores': { 'exact': 1 } } }) jinfos = submit_2_manager_and_wait_4_info(m, jobs, 'SUCCEED') assert jinfos jinfo = jinfos['host'] print('jinfo: {}'.format(jinfo)) assert all((jinfo.iterations, jinfo.iterations.get('start', -1) == 0, jinfo.iterations.get('stop', 0) == its, jinfo.iterations.get('total', 0) == its, jinfo.iterations.get('finished', 0) == its, jinfo.iterations.get('failed', -1) == 0)) its = 2 jobs = Jobs(). \ add_std({ 'name': 'host2', 'iteration': { 'stop': its }, 'execution': { 'exec': 'hostname', 'args': [ '--fqdn' ], 'stdout': 'out' }, 'resources': { 'numCores': { 'exact': 1 } } }) jinfos = submit_2_manager_and_wait_4_info(m, jobs, 'SUCCEED', withChilds=True) assert jinfos jinfo = jinfos['host2'] print('jinfo: {}'.format(jinfo)) assert all((jinfo.iterations, jinfo.iterations.get('start', -1) == 0, jinfo.iterations.get('stop', 0) == its, jinfo.iterations.get('total', 0) == its, jinfo.iterations.get('finished', 0) == its, jinfo.iterations.get('failed', -1) == 0)) assert len(jinfo.childs) == its for iteration in range(its): job_it = jinfo.childs[iteration] assert all((job_it.iteration == iteration, job_it.name == '{}:{}'.format('host2', iteration), job_it.wdir == tmpdir, job_it.total_cores == 1)) finally: if m: m.finish() # m.stopManager() m.cleanup() rmtree(tmpdir)
def test_slurmenv_api_submit_exceed_total_cores(): if not in_slurm_allocation() or get_num_slurm_nodes() < 2: pytest.skip('test not run in slurm allocation or allocation is smaller than 2 nodes') resources, allocation = get_slurm_resources_binded() set_pythonpath_to_qcg_module() tmpdir = str(tempfile.mkdtemp(dir=SHARED_PATH)) try: m = LocalManager(['--log', 'debug', '--wd', tmpdir, '--report-format', 'json'], {'wdir': str(tmpdir)}) jobs = Jobs(). \ add_std({ 'name': 'date', 'execution': { 'exec': '/bin/date' }, 'resources': { 'numCores': { 'exact': resources.total_cores + 1 } }}) with pytest.raises(ConnectionError, match=r".*Not enough resources.*"): m.submit(jobs) assert len(m.list()) == 0 jobs = Jobs(). \ add_std({ 'name': 'date', 'execution': { 'exec': '/bin/date' }, 'resources': { 'numNodes': { 'exact': resources.total_nodes + 1 } }}) with pytest.raises(ConnectionError, match=r".*Not enough resources.*"): ids = m.submit(jobs) assert len(m.list()) == 0 jobs = Jobs(). \ add_std({ 'name': 'date', 'execution': { 'exec': '/bin/date', 'stdout': 'std.out', }, 'resources': { 'numCores': { 'exact': resources.total_cores } } }) jinfos = submit_2_manager_and_wait_4_info(m, jobs, 'SUCCEED') assert jinfos['date'].total_cores == resources.total_cores finally: if m: m.finish() # m.stopManager() m.cleanup() rmtree(tmpdir)
def test_resume_tracker_files(tmpdir): try: m = LocalManager(['--log', 'debug', '--wd', tmpdir, '--report-format', 'json', '--nodes', '4'], {'wdir': str(tmpdir)}) job_req = { 'name': 'host', 'execution': { 'exec': '/bin/date', 'stdout': 'out', }, 'resources': { 'numCores': { 'exact': 1 } } } jobs = Jobs().add_std(job_req) submit_2_manager_and_wait_4_info(m, jobs, 'SUCCEED') time.sleep(1) aux_dir = find_single_aux_dir(str(tmpdir)) print(f'aux_dir content: {str(listdir(aux_dir))}') assert all(exists(join(aux_dir, fname)) for fname in ['track.reqs', 'track.states']), \ f"missing tracker files in {aux_dir}: {str(listdir(aux_dir))}" finally: if m: m.finish() m.cleanup() rmtree(tmpdir)
def test_slurmenv_api_submit_many_cores(): if not in_slurm_allocation() or get_num_slurm_nodes() < 2: pytest.skip('test not run in slurm allocation or allocation is smaller than 2 nodes') resources, allocation = get_slurm_resources_binded() set_pythonpath_to_qcg_module() tmpdir = str(tempfile.mkdtemp(dir=SHARED_PATH)) try: m = LocalManager(['--log', 'debug', '--wd', tmpdir, '--report-format', 'json'], {'wdir': str(tmpdir)}) jobs = Jobs(). \ add_std({ 'name': 'host', 'execution': { 'exec': '/bin/hostname', 'args': [ '--fqdn' ], 'stdout': 'out', }, 'resources': { 'numCores': { 'exact': resources.total_cores } } }) jinfos = submit_2_manager_and_wait_4_info(m, jobs, 'SUCCEED') # check working directories of job's inside working directory of service assert tmpdir == jinfos['host'].wdir, str(jinfos['host'].wdir) assert all((len(jinfos['host'].nodes) == resources.total_nodes, jinfos['host'].total_cores == resources.total_cores)), str(jinfos['host']) finally: if m: m.finish() # m.stopManager() m.cleanup() rmtree(tmpdir)
def test_slurmenv_api_submit_simple(): if not in_slurm_allocation() or get_num_slurm_nodes() < 2: pytest.skip('test not run in slurm allocation or allocation is smaller than 2 nodes') resources, allocation = get_slurm_resources_binded() set_pythonpath_to_qcg_module() tmpdir = str(tempfile.mkdtemp(dir=SHARED_PATH)) try: m = LocalManager(['--log', 'debug', '--wd', tmpdir, '--report-format', 'json'], {'wdir': str(tmpdir)}) jobs = Jobs().\ add_std({ 'name': 'host', 'execution': { 'exec': '/bin/hostname', 'args': [ '--fqdn' ], 'stdout': 'std.out', 'stderr': 'std.err' }}) assert submit_2_manager_and_wait_4_info(m, jobs, 'SUCCEED') finally: if m: m.finish() # m.stopManager() m.cleanup() rmtree(tmpdir)
def test_slurmenv_api_cancel_kill_nl(): if not in_slurm_allocation() or get_num_slurm_nodes() < 2: pytest.skip('test not run in slurm allocation or allocation is smaller than 2 nodes') resources, allocation = get_slurm_resources_binded() set_pythonpath_to_qcg_module() tmpdir = str(tempfile.mkdtemp(dir=SHARED_PATH)) print(f'tmpdir: {tmpdir}') try: m = LocalManager(['--log', 'debug', '--wd', tmpdir, '--report-format', 'json'], {'wdir': str(tmpdir)}) iters=10 ids = m.submit(Jobs(). add(script='trap "" SIGTERM; sleep 30s', iteration=iters, stdout='sleep.out.${it}', stderr='sleep.err.${it}', numCores=1) ) jid = ids[0] assert len(m.list()) == 1 list_jid = list(m.list().keys())[0] assert list_jid == jid # wait for job to start executing sleep(2) m.cancel([jid]) # wait for SIGTERM job cancel sleep(2) jinfos = m.info_parsed(ids) assert all((len(jinfos) == 1, jid in jinfos, jinfos[jid].status == 'QUEUED')) # wait for SIGKILL job cancel (~ExecutionJob.SIG_KILL_TIMEOUT) sleep(ExecutionJob.SIG_KILL_TIMEOUT) jinfos = m.info_parsed(ids, withChilds=True) assert all((len(jinfos) == 1, jid in jinfos, jinfos[jid].status == 'CANCELED')) # the canceled iterations are included in 'failed' entry in job statistics # the cancel status is presented in 'childs/state' entry assert all((jinfos[jid].iterations, jinfos[jid].iterations.get('start', -1) == 0, jinfos[jid].iterations.get('stop', 0) == iters, jinfos[jid].iterations.get('total', 0) == iters, jinfos[jid].iterations.get('finished', 0) == iters, jinfos[jid].iterations.get('failed', -1) == iters)) assert len(jinfos[jid].childs) == iters for iteration in range(iters): job_it = jinfos[jid].childs[iteration] assert all((job_it.iteration == iteration, job_it.name == '{}:{}'.format(jid, iteration), job_it.status == 'CANCELED')), str(job_it) m.remove(jid) finally: m.finish() m.cleanup()
def test_local_manager_submit_simple(tmpdir): cores = 4 # switch on debugging (by default in api.log file) m = LocalManager(['--wd', str(tmpdir), '--nodes', str(cores)], {'wdir': str(tmpdir)}) try: res = m.resources() assert all( ('total_nodes' in res, 'total_cores' in res, res['total_nodes'] == 1, res['total_cores'] == cores)) ids = m.submit(Jobs().add(name='host', exec='/bin/hostname', args=['--fqdn'], stdout='host.stdout').add( name='date', exec='/bin/date', stdout='date.stdout', numCores={'exact': 2})) assert len(m.list()) == 2 m.wait4(ids) jinfos = m.info(ids) assert all( ('jobs' in jinfos, len(jinfos['jobs'].keys()) == 2, 'host' in jinfos['jobs'], 'date' in jinfos['jobs'], jinfos['jobs']['host'].get('data', {}).get('status', '') == 'SUCCEED', jinfos['jobs']['date'].get('data', {}).get('status', '') == 'SUCCEED')) aux_dir = find_single_aux_dir(str(tmpdir)) assert all( (exists(tmpdir.join('.qcgpjm-client', 'api.log')), exists(join(aux_dir, 'service.log')), exists(tmpdir.join('host.stdout')), exists(tmpdir.join('date.stdout')))) finally: m.finish() # m.stopManager() m.cleanup()
def test_slurmenv_api_std_streams_many_cores(): if not in_slurm_allocation() or get_num_slurm_nodes() < 2: pytest.skip('test not run in slurm allocation or allocation is smaller than 2 nodes') resources, allocation = get_slurm_resources_binded() set_pythonpath_to_qcg_module() tmpdir = str(tempfile.mkdtemp(dir=SHARED_PATH)) try: m = LocalManager(['--log', 'debug', '--wd', tmpdir, '--report-format', 'json'], {'wdir': str(tmpdir)}) jobs = Jobs(). \ add_std({ 'name': 'host', 'execution': { 'exec': 'cat', 'stdin': '/etc/system-release', 'stdout': 'out', 'stderr': 'err' }, 'resources': { 'numCores': { 'exact': 2 } } }) assert submit_2_manager_and_wait_4_info(m, jobs, 'SUCCEED') assert all((exists(join(tmpdir, 'out')), exists(join(tmpdir, 'err')))) with open(join(tmpdir, 'out'), 'rt') as out_f: out = out_f.read() with open(join('/etc/system-release'), 'rt') as sr_f: system_release = sr_f.read() assert system_release in out finally: if m: m.finish() # m.stopManager() m.cleanup() rmtree(tmpdir)
def test_resume_simple(tmpdir): try: ncores = 4 m = LocalManager(['--log', 'debug', '--wd', tmpdir, '--report-format', 'json', '--nodes', str(ncores)], {'wdir': str(tmpdir)}) its = 10 job_req = { 'name': 'sleep', 'execution': { 'exec': '/bin/sleep', 'args': [ '4s' ], 'stdout': 'out', }, 'iteration': { 'stop': its }, 'resources': { 'numCores': { 'exact': 1 } } } jobs = Jobs().add_std(job_req) job_ids = m.submit(jobs) # because job iterations executes in order, after finish of 4th iteration, the three previous should also finish m.wait4('sleep:3') jinfos = m.info_parsed(job_ids, withChilds=True) assert jinfos jinfo = jinfos['sleep'] assert all((jinfo.iterations, jinfo.iterations.get('start', -1) == 0, jinfo.iterations.get('stop', 0) == its, jinfo.iterations.get('total', 0) == its, jinfo.iterations.get('finished', 0) == ncores, jinfo.iterations.get('failed', -1) == 0)), str(jinfo) assert len(jinfo.childs) == its for iteration in range(its): job_it = jinfo.childs[iteration] exp_status = ['SUCCEED'] if iteration > 3: exp_status = ['EXECUTING', 'SCHEDULED', 'QUEUED'] assert all((job_it.iteration == iteration, job_it.name == '{}:{}'.format('sleep', iteration), job_it.status in exp_status)),\ f"{job_it.iteration} != {iteration}, {job_it.name} != {'{}:{}'.format('sleep', iteration)}, {job_it.status} != {exp_status}" # kill process m.kill_manager_process() m.cleanup() ncores = 4 m = LocalManager(['--log', 'debug', '--wd', tmpdir, '--report-format', 'json', '--nodes', str(ncores), '--resume', tmpdir], {'wdir': str(tmpdir)}) m.wait4all() jinfos = m.info_parsed(job_ids, withChilds=True) assert jinfos jinfo = jinfos['sleep'] assert all((jinfo.iterations, jinfo.iterations.get('start', -1) == 0, jinfo.iterations.get('stop', 0) == its, jinfo.iterations.get('total', 0) == its, jinfo.iterations.get('finished', 0) == its, jinfo.iterations.get('failed', -1) == 0)), str(jinfo) assert len(jinfo.childs) == its for iteration in range(its): job_it = jinfo.childs[iteration] assert all((job_it.iteration == iteration, job_it.name == '{}:{}'.format('sleep', iteration), job_it.status == 'SUCCEED')), \ f"{job_it.iteration} != {iteration}, {job_it.name} != {'{}:{}'.format('sleep', iteration)}, {job_it.status} != SUCCEED" finally: if m: m.finish() m.cleanup()
def test_slurmenv_api_iteration_node_scheduling(): if not in_slurm_allocation() or get_num_slurm_nodes() < 2: pytest.skip('test not run in slurm allocation or allocation is smaller than 2 nodes') # TODO: it's hard to write comprehensive iteration scheduling node tests on only two nodes (in slurm's \ # development docker) resources, allocation = get_slurm_resources_binded() set_pythonpath_to_qcg_module() tmpdir = str(tempfile.mkdtemp(dir=SHARED_PATH)) try: m = LocalManager(['--log', 'debug', '--wd', tmpdir, '--report-format', 'json'], {'wdir': str(tmpdir)}) # in that case the 'split-into' is default the number of iterations # so total available resources should be splited into two partitions and each of the # iteration should run on its own partition jname = 'host' its = 2 jobs = Jobs(). \ add_std({ 'name': jname, 'iteration': { 'stop': its }, 'execution': { 'exec': 'sleep', 'args': [ '2s' ], 'stdout': 'out_${it}', 'stderr': 'err_${it}' }, 'resources': { 'numCores': { 'exact': resources.nodes[0].total }, 'numNodes': { 'min': 1, 'scheduler': { 'name': 'split-into' } } } }) jinfos = submit_2_manager_and_wait_4_info(m, jobs, 'SUCCEED', withChilds=True) assert jinfos jinfo = jinfos[jname] assert all((jinfo.iterations, jinfo.iterations.get('start', -1) == 0, jinfo.iterations.get('stop', 0) == its, jinfo.iterations.get('total', 0) == its, jinfo.iterations.get('finished', 0) == its, jinfo.iterations.get('failed', -1) == 0)), str(jinfo) assert len(jinfo.childs) == its for iteration in range(its): job_it = jinfo.childs[iteration] assert all((job_it.iteration == iteration, job_it.name == '{}:{}'.format(jname, iteration), job_it.total_cores == resources.nodes[0].total, len(job_it.nodes) == 1)), str(job_it) # all iterations has been scheduled across all nodes assert sum([ len(child.nodes) for child in jinfo.childs ]) == resources.total_nodes # the iterations should execute on different node assert list(jinfo.childs[0].nodes)[0] != list(jinfo.childs[1].nodes)[0] # we explicity specify the 'split-into' parameter to 2, behavior should be the same as in the # previous example jname = 'host2' its = 2 jobs = Jobs(). \ add_std({ 'name': jname, 'iteration': { 'stop': its }, 'execution': { 'exec': 'sleep', 'args': [ '2s' ], 'stdout': 'out' }, 'resources': { 'numCores': { 'exact': resources.nodes[0].total }, 'numNodes': { 'min': 1, 'scheduler': { 'name': 'split-into', 'params': { 'parts': 2 } } } } }) jinfos = submit_2_manager_and_wait_4_info(m, jobs, 'SUCCEED', withChilds=True) assert jinfos jinfo = jinfos[jname] assert all((jinfo.iterations, jinfo.iterations.get('start', -1) == 0, jinfo.iterations.get('stop', 0) == its, jinfo.iterations.get('total', 0) == its, jinfo.iterations.get('finished', 0) == its, jinfo.iterations.get('failed', -1) == 0)), str(jinfo) assert len(jinfo.childs) == its for iteration in range(its): job_it = jinfo.childs[iteration] assert all((job_it.iteration == iteration, job_it.name == '{}:{}'.format(jname, iteration), job_it.total_cores == resources.nodes[0].total, len(job_it.nodes) == 1)), str(job_it) # all iterations has been scheduled across all nodes assert sum([ len(child.nodes) for child in jinfo.childs ]) == resources.total_nodes # the iterations should execute on different node assert list(jinfo.childs[0].nodes)[0] != list(jinfo.childs[1].nodes)[0] # the 'maximum-iters' scheduler is trying to launch as many iterations in the same time on all available # resources jname = 'host3' its = 4 jobs = Jobs(). \ add_std({ 'name': jname, 'iteration': { 'stop': its }, 'execution': { 'exec': 'sleep', 'args': [ '2s' ], 'stdout': 'out' }, 'resources': { 'numCores': { 'exact': resources.nodes[0].total }, 'numNodes': { 'min': 1, 'scheduler': { 'name': 'maximum-iters' } } } }) jinfos = submit_2_manager_and_wait_4_info(m, jobs, 'SUCCEED', withChilds=True) assert jinfos jinfo = jinfos[jname] assert all((jinfo.iterations, jinfo.iterations.get('start', -1) == 0, jinfo.iterations.get('stop', 0) == its, jinfo.iterations.get('total', 0) == its, jinfo.iterations.get('finished', 0) == its, jinfo.iterations.get('failed', -1) == 0)), str(jinfo) assert len(jinfo.childs) == its for iteration in range(its): job_it = jinfo.childs[iteration] print('job iteration {}: {}'.format(iteration, str(job_it))) assert all((job_it.iteration == iteration, job_it.name == '{}:{}'.format(jname, iteration), job_it.total_cores == resources.nodes[0].total, len(job_it.nodes) == 1)), str(job_it) assert sum([len(child.nodes) for child in jinfo.childs]) == its finally: if m: m.finish() # m.stopManager() m.cleanup() rmtree(tmpdir)
def test_slurmenv_api_iteration_core_scheduling(): if not in_slurm_allocation() or get_num_slurm_nodes() < 2: pytest.skip('test not run in slurm allocation or allocation is smaller than 2 nodes') resources, allocation = get_slurm_resources_binded() set_pythonpath_to_qcg_module() tmpdir = str(tempfile.mkdtemp(dir=SHARED_PATH)) try: m = LocalManager(['--log', 'debug', '--wd', tmpdir, '--report-format', 'json'], {'wdir': str(tmpdir)}) # in that case the 'split-into' is default the number of iterations # so total available resources should be splited into two partitions and each of the # iteration should run on its own partition jname = 'host' its = 2 jobs = Jobs(). \ add_std({ 'name': jname, 'iteration': { 'stop': its }, 'execution': { 'exec': 'hostname', 'args': [ '--fqdn' ], 'stdout': 'out' }, 'resources': { 'numCores': { 'min': 1, 'scheduler': { 'name': 'split-into' } } } }) jinfos = submit_2_manager_and_wait_4_info(m, jobs, 'SUCCEED', withChilds=True) assert jinfos jinfo = jinfos[jname] assert all((jinfo.iterations, jinfo.iterations.get('start', -1) == 0, jinfo.iterations.get('stop', 0) == its, jinfo.iterations.get('total', 0) == its, jinfo.iterations.get('finished', 0) == its, jinfo.iterations.get('failed', -1) == 0)), str(jinfo) assert len(jinfo.childs) == its for iteration in range(its): job_it = jinfo.childs[iteration] print('job iteration {}: {}'.format(iteration, str(job_it))) assert all((job_it.iteration == iteration, job_it.name == '{}:{}'.format(jname, iteration), job_it.total_cores >= 1, job_it.total_cores < resources.total_cores)), str(job_it) # all iterations has been scheduled across all resources assert sum([ child.total_cores for child in jinfo.childs ]) == resources.total_cores assert all(child.total_cores == resources.total_cores / its for child in jinfo.childs) # we explicity specify the 'split-into' parameter to 2, behavior should be the same as in the # previous example jname = 'host2' its = 2 jobs = Jobs(). \ add_std({ 'name': jname, 'iteration': { 'stop': its }, 'execution': { 'exec': 'hostname', 'args': [ '--fqdn' ], 'stdout': 'out' }, 'resources': { 'numCores': { 'min': 1, 'scheduler': { 'name': 'split-into', 'params': { 'parts': 2 } } } } }) jinfos = submit_2_manager_and_wait_4_info(m, jobs, 'SUCCEED', withChilds=True) assert jinfos jinfo = jinfos[jname] assert all((jinfo.iterations, jinfo.iterations.get('start', -1) == 0, jinfo.iterations.get('stop', 0) == its, jinfo.iterations.get('total', 0) == its, jinfo.iterations.get('finished', 0) == its, jinfo.iterations.get('failed', -1) == 0)), str(jinfo) assert len(jinfo.childs) == its for iteration in range(its): job_it = jinfo.childs[iteration] print('job iteration {}: {}'.format(iteration, str(job_it))) assert all((job_it.iteration == iteration, job_it.name == '{}:{}'.format(jname, iteration), job_it.total_cores >= 1, job_it.total_cores < resources.total_cores)), str(job_it) # all iterations has been scheduled across all resources assert sum([ child.total_cores for child in jinfo.childs ]) == resources.total_cores assert all(child.total_cores == resources.total_cores / 2 for child in jinfo.childs) # we explicity specify the 'split-into' parameter to 4, the two iterations should be sheduled # on half of the available resources jname = 'host3' its = 2 jobs = Jobs(). \ add_std({ 'name': jname, 'iteration': { 'stop': its }, 'execution': { 'exec': 'hostname', 'args': [ '--fqdn' ], 'stdout': 'out' }, 'resources': { 'numCores': { 'min': 1, 'scheduler': { 'name': 'split-into', 'params': { 'parts': 4 } } } } }) jinfos = submit_2_manager_and_wait_4_info(m, jobs, 'SUCCEED', withChilds=True) assert jinfos jinfo = jinfos[jname] assert all((jinfo.iterations, jinfo.iterations.get('start', -1) == 0, jinfo.iterations.get('stop', 0) == its, jinfo.iterations.get('total', 0) == its, jinfo.iterations.get('finished', 0) == its, jinfo.iterations.get('failed', -1) == 0)), str(jinfo) assert len(jinfo.childs) == its for iteration in range(its): job_it = jinfo.childs[iteration] print('job iteration {}: {}'.format(iteration, str(job_it))) assert all((job_it.iteration == iteration, job_it.name == '{}:{}'.format(jname, iteration), job_it.total_cores >= 1, job_it.total_cores < resources.total_cores)), str(job_it) # all iterations has been scheduled across all resources assert sum([ child.total_cores for child in jinfo.childs ]) == resources.total_cores / 2 assert all(child.total_cores == resources.total_cores / 4 for child in jinfo.childs) # we explicity specify the 'split-into' parameter to 2, but the number of iterations is larger than # available partitions in the same time, so they should be executed serially (by parts) jname = 'host4' its = 10 jobs = Jobs(). \ add_std({ 'name': jname, 'iteration': { 'stop': its }, 'execution': { 'exec': 'hostname', 'args': [ '--fqdn' ], 'stdout': 'out' }, 'resources': { 'numCores': { 'min': 1, 'scheduler': { 'name': 'split-into', 'params': { 'parts': 2 } } } } }) jinfos = submit_2_manager_and_wait_4_info(m, jobs, 'SUCCEED', withChilds=True) assert jinfos jinfo = jinfos[jname] assert all((jinfo.iterations, jinfo.iterations.get('start', -1) == 0, jinfo.iterations.get('stop', 0) == its, jinfo.iterations.get('total', 0) == its, jinfo.iterations.get('finished', 0) == its, jinfo.iterations.get('failed', -1) == 0)), str(jinfo) assert len(jinfo.childs) == its for iteration in range(its): job_it = jinfo.childs[iteration] print('job iteration {}: {}'.format(iteration, str(job_it))) assert all((job_it.iteration == iteration, job_it.name == '{}:{}'.format(jname, iteration), job_it.total_cores >= 1, job_it.total_cores < resources.total_cores)), str(job_it) assert all(child.total_cores == resources.total_cores / 2 for child in jinfo.childs) # the 'maximum-iters' scheduler is trying to launch as many iterations in the same time on all available # resources jname = 'host5' its = 2 jobs = Jobs(). \ add_std({ 'name': jname, 'iteration': { 'stop': its }, 'execution': { 'exec': 'sleep', 'args': [ '2s' ], 'stdout': 'out' }, 'resources': { 'numCores': { 'min': 1, 'scheduler': { 'name': 'maximum-iters' } } } }) jinfos = submit_2_manager_and_wait_4_info(m, jobs, 'SUCCEED', withChilds=True) assert jinfos jinfo = jinfos[jname] assert all((jinfo.iterations, jinfo.iterations.get('start', -1) == 0, jinfo.iterations.get('stop', 0) == its, jinfo.iterations.get('total', 0) == its, jinfo.iterations.get('finished', 0) == its, jinfo.iterations.get('failed', -1) == 0)), str(jinfo) assert len(jinfo.childs) == its for iteration in range(its): job_it = jinfo.childs[iteration] print('job iteration {}: {}'.format(iteration, str(job_it))) assert all((job_it.iteration == iteration, job_it.name == '{}:{}'.format(jname, iteration), job_it.total_cores >= 1, job_it.total_cores < resources.total_cores)), str(job_it) assert sum([ child.total_cores for child in jinfo.childs ]) == resources.total_cores # the 'maximum-iters' scheduler is trying to launch as many iterations in the same time on all available # resources jname = 'host6' its = resources.total_cores jobs = Jobs(). \ add_std({ 'name': jname, 'iteration': { 'stop': its }, 'execution': { 'exec': 'sleep', 'args': [ '2s' ], 'stdout': 'out' }, 'resources': { 'numCores': { 'min': 1, 'scheduler': { 'name': 'maximum-iters' } } } }) jinfos = submit_2_manager_and_wait_4_info(m, jobs, 'SUCCEED', withChilds=True) assert jinfos jinfo = jinfos[jname] assert all((jinfo.iterations, jinfo.iterations.get('start', -1) == 0, jinfo.iterations.get('stop', 0) == its, jinfo.iterations.get('total', 0) == its, jinfo.iterations.get('finished', 0) == its, jinfo.iterations.get('failed', -1) == 0)), str(jinfo) assert len(jinfo.childs) == its for iteration in range(its): job_it = jinfo.childs[iteration] print('job iteration {}: {}'.format(iteration, str(job_it))) assert all((job_it.iteration == iteration, job_it.name == '{}:{}'.format(jname, iteration), job_it.total_cores >= 1, job_it.total_cores < resources.total_cores)), str(job_it) assert sum([ child.total_cores for child in jinfo.childs ]) == resources.total_cores # in case where number of iterations exceeds the number of available resources, the 'maximum-iters' schedulers # splits iterations into 'steps' minimizing this number, and allocates as many resources as possible for each # iteration inside 'step' jname = 'host7' its = resources.total_cores jobs = Jobs(). \ add_std({ 'name': jname, 'iteration': { 'stop': its }, 'execution': { 'exec': 'sleep', 'args': [ '2s' ], 'stdout': 'out' }, 'resources': { 'numCores': { 'min': 1, 'scheduler': { 'name': 'maximum-iters' } } } }) jinfos = submit_2_manager_and_wait_4_info(m, jobs, 'SUCCEED', withChilds=True) assert jinfos jinfo = jinfos[jname] assert all((jinfo.iterations, jinfo.iterations.get('start', -1) == 0, jinfo.iterations.get('stop', 0) == its, jinfo.iterations.get('total', 0) == its, jinfo.iterations.get('finished', 0) == its, jinfo.iterations.get('failed', -1) == 0)), str(jinfo) assert len(jinfo.childs) == its for iteration in range(its): job_it = jinfo.childs[iteration] print('job iteration {}: {}'.format(iteration, str(job_it))) assert all((job_it.iteration == iteration, job_it.name == '{}:{}'.format(jname, iteration), job_it.total_cores >= 1, job_it.total_cores < resources.total_cores)), str(job_it) assert (child.total_cores == 1 for child in jinfo.childs) assert sum([ child.total_cores for child in jinfo.childs ]) == resources.total_cores # in case where number of iterations exceeds the number of available resources, the 'maximum-iters' schedulers # splits iterations into 'steps' minimizing this number, and allocates as many resources as possible for each # iteration inside 'step' jname = 'host8' its = resources.total_cores * 2 jobs = Jobs(). \ add_std({ 'name': jname, 'iteration': { 'stop': its }, 'execution': { 'exec': 'sleep', 'args': [ '2s' ], 'stdout': 'out' }, 'resources': { 'numCores': { 'min': 1, 'scheduler': { 'name': 'maximum-iters' } } } }) jinfos = submit_2_manager_and_wait_4_info(m, jobs, 'SUCCEED', withChilds=True) assert jinfos jinfo = jinfos[jname] assert all((jinfo.iterations, jinfo.iterations.get('start', -1) == 0, jinfo.iterations.get('stop', 0) == its, jinfo.iterations.get('total', 0) == its, jinfo.iterations.get('finished', 0) == its, jinfo.iterations.get('failed', -1) == 0)), str(jinfo) assert len(jinfo.childs) == its for iteration in range(its): job_it = jinfo.childs[iteration] print('job iteration {}: {}'.format(iteration, str(job_it))) assert all((job_it.iteration == iteration, job_it.name == '{}:{}'.format(jname, iteration), job_it.total_cores >= 1, job_it.total_cores < resources.total_cores)), str(job_it) assert (child.total_cores == 1 for child in jinfo.childs) assert sum([ child.total_cores for child in jinfo.childs ]) == resources.total_cores * 2 # in case where number of iterations exceeds the number of available resources, the 'maximum-iters' schedulers # splits iterations into 'steps' minimizing this number, and allocates as many resources as possible for each # iteration inside 'step' jname = 'host9' its = resources.total_cores + 1 jobs = Jobs(). \ add_std({ 'name': jname, 'iteration': { 'stop': its }, 'execution': { 'exec': 'sleep', 'args': [ '2s' ], 'stdout': 'out' }, 'resources': { 'numCores': { 'min': 1, 'scheduler': { 'name': 'maximum-iters' } } } }) jinfos = submit_2_manager_and_wait_4_info(m, jobs, 'SUCCEED', withChilds=True) assert jinfos jinfo = jinfos[jname] assert all((jinfo.iterations, jinfo.iterations.get('start', -1) == 0, jinfo.iterations.get('stop', 0) == its, jinfo.iterations.get('total', 0) == its, jinfo.iterations.get('finished', 0) == its, jinfo.iterations.get('failed', -1) == 0)), str(jinfo) assert len(jinfo.childs) == its for iteration in range(its): job_it = jinfo.childs[iteration] print('job iteration {}: {}'.format(iteration, str(job_it))) assert all((job_it.iteration == iteration, job_it.name == '{}:{}'.format(jname, iteration), job_it.total_cores >= 1)), str(job_it) assert (child.total_cores == 1 for child in jinfo.childs) # because all iterations will be splited in two 'steps' and in each step the iterations that has been assigned # for the step should usage maximum available resources assert sum([ child.total_cores for child in jinfo.childs ]) == resources.total_cores * 2 # in this case where two iterations can't fit at once on resources, all the iterations should be scheduled # serially on all available resources jname = 'host10' its = resources.total_nodes jobs = Jobs(). \ add_std({ 'name': jname, 'iteration': { 'stop': its }, 'execution': { 'exec': 'sleep', 'args': [ '2s' ], 'stdout': 'out' }, 'resources': { 'numCores': { 'min': resources.total_cores - 1, 'scheduler': { 'name': 'maximum-iters' } } } }) jinfos = submit_2_manager_and_wait_4_info(m, jobs, 'SUCCEED', withChilds=True) assert jinfos jinfo = jinfos[jname] assert all((jinfo.iterations, jinfo.iterations.get('start', -1) == 0, jinfo.iterations.get('stop', 0) == its, jinfo.iterations.get('total', 0) == its, jinfo.iterations.get('finished', 0) == its, jinfo.iterations.get('failed', -1) == 0)), str(jinfo) assert len(jinfo.childs) == its for iteration in range(its): job_it = jinfo.childs[iteration] print('job iteration {}: {}'.format(iteration, str(job_it))) assert all((job_it.iteration == iteration, job_it.name == '{}:{}'.format(jname, iteration), job_it.total_cores == resources.total_cores, len(job_it.nodes) == resources.total_nodes)),\ str(job_it) finally: if m: m.finish() # m.stopManager() m.cleanup() rmtree(tmpdir)
def test_slurmenv_api_submit_resource_ranges(): if not in_slurm_allocation() or get_num_slurm_nodes() < 2: pytest.skip('test not run in slurm allocation or allocation is smaller than 2 nodes') resources, allocation = get_slurm_resources_binded() set_pythonpath_to_qcg_module() tmpdir = str(tempfile.mkdtemp(dir=SHARED_PATH)) try: m = LocalManager(['--log', 'debug', '--wd', tmpdir, '--report-format', 'json'], {'wdir': str(tmpdir)}) jobs = Jobs(). \ add_std({ 'name': 'host', 'execution': { 'exec': '/bin/hostname', 'args': [ '--fqdn' ], 'stdout': 'out', }, 'resources': { 'numCores': { 'min': 1 } } }) # job should faile because of missing 'max' parameter jinfos = submit_2_manager_and_wait_4_info(m, jobs, 'FAILED') jinfo = jinfos['host'] assert "Both core's range boundaries (min, max) must be defined" in jinfo.messages, str(jinfo) jobs = Jobs(). \ add_std({ 'name': 'host2', 'execution': { 'exec': '/bin/hostname', 'args': [ '--fqdn' ], 'stdout': 'out', }, 'resources': { 'numNodes': { 'exact': 1 }, 'numCores': { 'min': 1, 'max': resources.nodes[0].total + 1 } } }) # job should run on single node (the first free) with all available cores jinfos = submit_2_manager_and_wait_4_info(m, jobs, 'SUCCEED') jinfo = jinfos['host2'] assert all((len(jinfo.nodes) == 1, jinfo.total_cores == resources.nodes[0].total)), str(jinfo) jobs = Jobs(). \ add_std({ 'name': 'host3', 'execution': { 'exec': '/bin/hostname', 'args': [ '--fqdn' ], 'stdout': 'out', }, 'resources': { 'numCores': { 'min': 1, 'max': resources.nodes[0].total + 1 } } }) # job should run on at least two nodes with total maximum given cores jinfos = submit_2_manager_and_wait_4_info(m, jobs, 'SUCCEED') jinfo = jinfos['host3'] assert all((len(jinfo.nodes) == 2, jinfo.total_cores == resources.nodes[0].total + 1)), str(jinfo) finally: if m: m.finish() # m.stopManager() m.cleanup() rmtree(tmpdir)
###################################################################### config PilotJob ###################################################################### """ from qcg.pilotjob.api.manager import Manager from qcg.pilotjob.api.manager import LocalManager from qcg.pilotjob.api.job import Jobs # m = LocalManager(cfg={'log_level': 'DEBUG'}, server_args=['--log', 'debug']) m = LocalManager() # get available resources print("\n\navailable resources:\n%s\n" % str(m.resources())) # submit jobs and save their names in 'ids' list jobs = Jobs() print("Start Adding jobs . . .\n\n") INSTANCE_INDEX = 0 for i in range(NUM_INSTANCES): for SUBMODEL in ['macro', 'micro']: cmd = '%s run_mscale.py --submodel %s --data_dir=%s --instance_index %d --coupling_type %s --num_instances %d --weather_coupling %s' % ( PYTHON_CMD, SUBMODEL, DATA_DIR, INSTANCE_INDEX, COUPLING_TYPE, NUM_INSTANCES, WEATHER_COUPLING) print("\tAdd job with cmd = %s" % (cmd)) TaskID = 'TaskID%d_%s' % (INSTANCE_INDEX + 1, SUBMODEL) stderr = 'log_task/%s_${jname}__${uniq}.stderr' % (TaskID) stdout = 'log_task/%s_${jname}__${uniq}.stdout' % (TaskID)
from qcg.pilotjob.api.manager import Manager from qcg.pilotjob.api.manager import LocalManager from qcg.pilotjob.api.job import Jobs m = LocalManager(cfg={'log_level': 'DEBUG'}, server_args=['--log', 'debug']) # m = Manager(cfg={'log_level': 'DEBUG'}, server_args=['--log', 'debug']) # get available resources print("available resources:\n%s\n" % str(m.resources())) # submit jobs and save their names in 'ids' list jobs = Jobs() $submitted_jobs_list ids = m.submit(jobs) # wait until submited jobs finish m.wait4(ids) # get detailed information about submited and finished jobs print("jobs details:\n%s\n" % str(m.info(ids))) m.finish() m.kill_manager_process() m.cleanup()