def test_slurmenv_api_cancel_nl(): if not in_slurm_allocation() or get_num_slurm_nodes() < 2: pytest.skip('test not run in slurm allocation or allocation is smaller than 2 nodes') resources, allocation = get_slurm_resources_binded() set_pythonpath_to_qcg_module() tmpdir = str(tempfile.mkdtemp(dir=SHARED_PATH)) print(f'tmpdir: {tmpdir}') try: m = LocalManager(['--log', 'debug', '--wd', tmpdir, '--report-format', 'json'], {'wdir': str(tmpdir)}) iters=10 ids = m.submit(Jobs(). add(exec='/bin/sleep', args=['5s'], iteration=iters, stdout='sleep.out.${it}', stderr='sleep.err.${it}', numCores=1) ) jid = ids[0] assert len(m.list()) == 1 list_jid = list(m.list().keys())[0] assert list_jid == jid # wait for job to start executing sleep(2) m.cancel([jid]) m.wait4(m.list()) jinfos = m.info_parsed(ids, withChilds=True) assert all((len(jinfos) == 1, jid in jinfos, jinfos[jid].status == 'CANCELED')) # the canceled iterations are included in 'failed' entry in job statistics # the cancel status is presented in 'childs/state' entry assert all((jinfos[jid].iterations, jinfos[jid].iterations.get('start', -1) == 0, jinfos[jid].iterations.get('stop', 0) == iters, jinfos[jid].iterations.get('total', 0) == iters, jinfos[jid].iterations.get('finished', 0) == iters, jinfos[jid].iterations.get('failed', -1) == iters)) assert len(jinfos[jid].childs) == iters for iteration in range(iters): job_it = jinfos[jid].childs[iteration] assert all((job_it.iteration == iteration, job_it.name == '{}:{}'.format(jid, iteration), job_it.status == 'CANCELED')), str(job_it) m.remove(jid) finally: m.finish() m.cleanup() rmtree(tmpdir)
def test_local_manager_submit_simple(tmpdir): cores = 4 # switch on debugging (by default in api.log file) m = LocalManager(['--wd', str(tmpdir), '--nodes', str(cores)], {'wdir': str(tmpdir)}) try: res = m.resources() assert all( ('total_nodes' in res, 'total_cores' in res, res['total_nodes'] == 1, res['total_cores'] == cores)) ids = m.submit(Jobs().add(name='host', exec='/bin/hostname', args=['--fqdn'], stdout='host.stdout').add( name='date', exec='/bin/date', stdout='date.stdout', numCores={'exact': 2})) assert len(m.list()) == 2 m.wait4(ids) jinfos = m.info(ids) assert all( ('jobs' in jinfos, len(jinfos['jobs'].keys()) == 2, 'host' in jinfos['jobs'], 'date' in jinfos['jobs'], jinfos['jobs']['host'].get('data', {}).get('status', '') == 'SUCCEED', jinfos['jobs']['date'].get('data', {}).get('status', '') == 'SUCCEED')) aux_dir = find_single_aux_dir(str(tmpdir)) assert all( (exists(tmpdir.join('.qcgpjm-client', 'api.log')), exists(join(aux_dir, 'service.log')), exists(tmpdir.join('host.stdout')), exists(tmpdir.join('date.stdout')))) finally: m.finish() # m.stopManager() m.cleanup()
def test_resume_simple(tmpdir): try: ncores = 4 m = LocalManager(['--log', 'debug', '--wd', tmpdir, '--report-format', 'json', '--nodes', str(ncores)], {'wdir': str(tmpdir)}) its = 10 job_req = { 'name': 'sleep', 'execution': { 'exec': '/bin/sleep', 'args': [ '4s' ], 'stdout': 'out', }, 'iteration': { 'stop': its }, 'resources': { 'numCores': { 'exact': 1 } } } jobs = Jobs().add_std(job_req) job_ids = m.submit(jobs) # because job iterations executes in order, after finish of 4th iteration, the three previous should also finish m.wait4('sleep:3') jinfos = m.info_parsed(job_ids, withChilds=True) assert jinfos jinfo = jinfos['sleep'] assert all((jinfo.iterations, jinfo.iterations.get('start', -1) == 0, jinfo.iterations.get('stop', 0) == its, jinfo.iterations.get('total', 0) == its, jinfo.iterations.get('finished', 0) == ncores, jinfo.iterations.get('failed', -1) == 0)), str(jinfo) assert len(jinfo.childs) == its for iteration in range(its): job_it = jinfo.childs[iteration] exp_status = ['SUCCEED'] if iteration > 3: exp_status = ['EXECUTING', 'SCHEDULED', 'QUEUED'] assert all((job_it.iteration == iteration, job_it.name == '{}:{}'.format('sleep', iteration), job_it.status in exp_status)),\ f"{job_it.iteration} != {iteration}, {job_it.name} != {'{}:{}'.format('sleep', iteration)}, {job_it.status} != {exp_status}" # kill process m.kill_manager_process() m.cleanup() ncores = 4 m = LocalManager(['--log', 'debug', '--wd', tmpdir, '--report-format', 'json', '--nodes', str(ncores), '--resume', tmpdir], {'wdir': str(tmpdir)}) m.wait4all() jinfos = m.info_parsed(job_ids, withChilds=True) assert jinfos jinfo = jinfos['sleep'] assert all((jinfo.iterations, jinfo.iterations.get('start', -1) == 0, jinfo.iterations.get('stop', 0) == its, jinfo.iterations.get('total', 0) == its, jinfo.iterations.get('finished', 0) == its, jinfo.iterations.get('failed', -1) == 0)), str(jinfo) assert len(jinfo.childs) == its for iteration in range(its): job_it = jinfo.childs[iteration] assert all((job_it.iteration == iteration, job_it.name == '{}:{}'.format('sleep', iteration), job_it.status == 'SUCCEED')), \ f"{job_it.iteration} != {iteration}, {job_it.name} != {'{}:{}'.format('sleep', iteration)}, {job_it.status} != SUCCEED" finally: if m: m.finish() m.cleanup()
cmd = '%s run_mscale.py --submodel %s --data_dir=%s --instance_index %d --coupling_type %s --num_instances %d --weather_coupling %s' % ( PYTHON_CMD, SUBMODEL, DATA_DIR, INSTANCE_INDEX, COUPLING_TYPE, NUM_INSTANCES, WEATHER_COUPLING) print("\tAdd job with cmd = %s" % (cmd)) TaskID = 'TaskID%d_%s' % (INSTANCE_INDEX + 1, SUBMODEL) stderr = 'log_task/%s_${jname}__${uniq}.stderr' % (TaskID) stdout = 'log_task/%s_${jname}__${uniq}.stdout' % (TaskID) jobs.add(name=TaskID, exec='bash', args=['-c', cmd], stdout=stdout, stderr=stderr, numCores={'exact': INSTANCE_CORES}, model='default') INSTANCE_INDEX = INSTANCE_INDEX + 1 ids = m.submit(jobs) # wait until submited jobs finish m.wait4(ids) # get detailed information about submited and finished jobs print("jobs details:\n%s\n" % str(m.info(ids))) m.finish() m.kill_manager_process() m.cleanup()