def test_update(tmpdir, executable, comm, Process, do_multiple_mpi_programs): from copy import deepcopy from pylada.jobfolder.massextract import MassExtract from pylada.jobfolder import save # creates and start computing first set of jobs root = jobfolders(executable, 0, 3) program = Process(tmpdir, deepcopy(root), keepalive=True) assert program.keepalive program.start(comm) assert hasattr(program, '_comm') # update jobfolder and start computing second set of jobs supp = jobfolders(executable, 3, 6) program.update(supp) for key in supp.keys(): assert key in program.jobfolder for key in root.keys(): assert key in program.jobfolder assert len(program.jobfolder) == len(root) + len(supp) # create mass extraction object and check succcess save(program.jobfolder, str(tmpdir.join('dict.dict')), overwrite=True) extract = MassExtract(str(tmpdir.join('dict.dict'))) # wait for job completion and check for success program.wait() assert len(extract.success.values()) == 6 assert all((u for u in extract.success.values()))
def test_update_with_fail(tmpdir, executable, comm, Process, do_multiple_mpi_programs): """Tests JobFolderProcess with update and failure.""" from pytest import raises from pylada.process import Fail from pylada.jobfolder.massextract import MassExtract from pylada.jobfolder import save root = jobfolders(executable, 0, 3) root['1'].functional.order = 68 root['1'].functional.fail = 'end' root['1'].sleep = None supp = jobfolders(executable, 3, 6) supp['5'].sleep = 0 supp['5'].functional.order = 78 supp['5'].functional.fail = 'midway' program = Process(tmpdir, root, keepalive=True) # compute current jobs. program.start(comm) with raises(Fail): program.wait() assert len(program.errors) == 1 # compute second set of updated jobs program.update(supp) with raises(Fail): program.wait() assert len(program.errors) == 2
def test_update_with_fail(tmpdir, executable, comm, Process): """ Tests JobFolderProcess with update and failure. """ from pytest import raises from pylada.process import Fail from pylada.jobfolder.massextract import MassExtract from pylada.jobfolder import save root = jobfolders(executable, 0, 3) root['1'].functional.order = 68 root['1'].functional.fail = 'end' root['1'].sleep = None supp = jobfolders(executable, 3, 6) supp['5'].sleep = 0 supp['5'].functional.order = 78 supp['5'].functional.fail = 'midway' program = Process(tmpdir, root, keepalive=True) # compute current jobs. program.start(comm) with raises(Fail): program.wait() assert len(program.errors) == 1 # compute second set of updated jobs program.update(supp) with raises(Fail): program.wait() assert len(program.errors) == 2
def test_update(tmpdir, executable, comm, Process): from copy import deepcopy from pylada.jobfolder.massextract import MassExtract from pylada.jobfolder import save # creates and start computing first set of jobs root = jobfolders(executable, 0, 3) program = Process(tmpdir, deepcopy(root), keepalive=True) assert program.keepalive program.start(comm) assert hasattr(program, '_comm') # update jobfolder and start computing second set of jobs supp = jobfolders(executable, 3, 6) program.update(supp) for key in supp.keys(): assert key in program.jobfolder for key in root.keys(): assert key in program.jobfolder assert len(program.jobfolder) == len(root) + len(supp) # create mass extraction object and check succcess save(program.jobfolder, str(tmpdir.join('dict.dict')), overwrite=True) extract = MassExtract(str(tmpdir.join('dict.dict'))) # wait for job completion and check for success program.wait() assert len(extract.success.values()) == 6 assert all((u for u in extract.success.values()))
def test_update_with_deleteold(tmpdir, executable, comm, Process): # creates and start computing first set of jobs root = jobfolders(executable, 0, 3) program = Process(tmpdir, root, keepalive=True) program.start(comm) supp = jobfolders(executable, 3, 6) # wait for completion of current jobs, check that update with delete jobs results in only # uncompleted jobs program.wait() program.update(supp, deleteold=True) for key in supp.keys(): assert key in program.jobfolder assert len(program.jobfolder) == len(supp)
def test_update_with_deleteold(tmpdir, executable, comm, Process): # creates and start computing first set of jobs root = jobfolders(executable, 0, 3) program = Process(tmpdir, root, keepalive=True) program.start(comm) supp = jobfolders(executable, 3, 6) # wait for completion of current jobs, check that update with delete jobs results in only # uncompleted jobs program.wait() program.update(supp, deleteold=True) for key in supp.keys(): assert key in program.jobfolder assert len(program.jobfolder) == len(supp)
def test_getjobs(comm, tmpdir, executable, nprocs, njobs): """ Test scheduling. """ root = jobfolders(executable, 0, 8) def processalloc(job): """ returns a random number between 1 and 4 included. """ from random import randint return randint(1, comm['n']) for j in range(100): program = PoolProcess(root, processalloc=processalloc, outdir=str(tmpdir)) program._comm = comm for i in range(1000): jobs = program._getjobs() assert sum(program._alloc[u] for u in jobs) <= program._comm['n'],\ (jobs, [program._alloc[u] for u in jobs])
def test_failures(tmpdir, executable, comm): """ Tests whether scheduling jobs works on known failure cases. """ from pylada import default_comm from pylada.process.tests.functional import Functional root = jobfolders(executable, 0, 8) def processalloc_test1(job): d = {'1': 1, '0': 3, '3': 3, '2': 3, '5': 3, '4': 2, '7': 2, '6': 1} return d[job.name[1:-1]] program = PoolProcess(root, processalloc=processalloc_test1, outdir=str(tmpdir)) program._comm = comm for i in range(10000): jobs = program._getjobs() assert sum(program._alloc[u] for u in jobs) <= program._comm['n'],\ (jobs, [program._alloc[u] for u in jobs])
def test_getjobs(comm, tmpdir, executable, nprocs, njobs): """ Test scheduling. """ root = jobfolders(executable, 0, 8) def processalloc(job): """ returns a random number between 1 and 4 included. """ from random import randint return randint(1, comm['n']) for j in range(100): program = PoolProcess(root, processalloc=processalloc, outdir=str(tmpdir)) program._comm = comm for i in range(1000): jobs = program._getjobs() assert sum(program._alloc[u] for u in jobs) <= program._comm['n'],\ (jobs, [program._alloc[u] for u in jobs])
def test_failures(tmpdir, executable, comm): """ Tests whether scheduling jobs works on known failure cases. """ from pylada import default_comm from pylada.process.tests.functional import Functional root = jobfolders(executable, 0, 8) def processalloc_test1(job): d = {'1': 1, '0': 3, '3': 3, '2': 3, '5': 3, '4': 2, '7': 2, '6': 1} return d[job.name[1:-1]] program = PoolProcess(root, processalloc=processalloc_test1, outdir=str(tmpdir)) program._comm = comm for i in range(10000): jobs = program._getjobs() assert sum(program._alloc[u] for u in jobs) <= program._comm['n'],\ (jobs, [program._alloc[u] for u in jobs])
def root(executable): return jobfolders(executable, 0, 8)
def root(executable): return jobfolders(executable, 0, 8)