コード例 #1
0
 def _modularize(self, func):
     """pickle.dump function to tempfile"""
     if not self.source:
         # standard pickle.dump of inputs to a NamedTemporaryFile
         return dump(func, suffix='.pik', dir=self.workdir)
     # write func source to a NamedTemporaryFile (instead of pickle.dump)
     # ez*.py requires 'FUNC = <function>' to be included as module.FUNC
     return dump_source(func, alias='FUNC', dir=self.workdir)
コード例 #2
0
ファイル: mpi.py プロジェクト: vishalbelsare/pyina
 def _modularize(self, func):
     """pickle.dump function to tempfile"""
     if not self.source:
         # standard pickle.dump of inputs to a NamedTemporaryFile
         return dump(func, suffix='.pik', dir=self.workdir)
     # write func source to a NamedTemporaryFile (instead of pickle.dump)
     # ez*.py requires 'FUNC = <function>' to be included as module.FUNC
     return dump_source(func, alias='FUNC', dir=self.workdir)
コード例 #3
0
ファイル: mpi.py プロジェクト: vishalbelsare/pyina
 def _pickleargs(self, args, kwds):
     """pickle.dump args and kwds to tempfile"""
     # standard pickle.dump of inputs to a NamedTemporaryFile
     return dump((args, kwds), suffix='.arg', dir=self.workdir)
コード例 #4
0
def test_pickle_to_tempfile():
    if not WINDOWS:  #see: https://bugs.python.org/issue14243
        dumpfile = dump(x)
        _x = load(dumpfile)
        assert _x == x
コード例 #5
0
def func_pickle(func, suffix='.pik', dir='.'):
    """ standard pickle.dump of function to a NamedTemporaryFile """
    from dill.temp import dump
    return dump(func, suffix=suffix, dir=dir)
コード例 #6
0
ファイル: test_temp.py プロジェクト: matsjoyce/dill
def test_pickle_to_tempfile():
    if not WINDOWS:  #see: https://bugs.python.org/issue14243
        dumpfile = dump(x)
        _x = load(dumpfile)
        assert _x == x
コード例 #7
0
ファイル: ez_map.py プロジェクト: vishalbelsare/pyina
def ez_map(func, *arglist, **kwds):
    """higher-level map interface for selected mapper and launcher

maps function 'func' across arguments 'arglist'.  arguments and results
are stored and sent as pickled strings, while function 'func' is inspected
and written as a source file to be imported.

Further Input:
    nodes -- the number of parallel nodes
    launcher -- the launcher object
    scheduler -- the scheduler object
    mapper -- the mapper object
    timelimit -- string representation of maximum run time (e.g. '00:02')
    queue -- string name of selected queue (e.g. 'normal')
    """
    import dill as pickle
    import os.path, tempfile, subprocess
    from pyina.tools import which_strategy
    # mapper = None (allow for use of default mapper)
    if 'mapper' in kwds:
        mapper = kwds['mapper']
        if mapper() == "mpi_pool": scatter = False
        elif mapper() == "mpi_scatter": scatter = True
        else: raise NotImplementedError("Mapper '%s' not found." % mapper())
        ezdefaults['program'] = which_strategy(scatter, lazy=True)
    # override the defaults
    if 'nnodes' in kwds: ezdefaults['nodes'] = kwds['nnodes']
    if 'nodes' in kwds: ezdefaults['nodes'] = kwds['nodes']
    if 'timelimit' in kwds: ezdefaults['timelimit'] = kwds['timelimit']
    if 'queue' in kwds: ezdefaults['queue'] = kwds['queue']
    # set the scheduler & launcher (or use the given default)
    if 'launcher' in kwds: launcher = kwds['launcher']
    else: launcher = mpirun_launcher  #XXX: default = non_mpi?
    if 'scheduler' in kwds: scheduler = kwds['scheduler']
    else: scheduler = ''
    # set scratch directory (most often required for queue launcher)
    if 'workdir' in kwds: ezdefaults['workdir'] = kwds['workdir']
    else:
        if launcher in [torque_launcher, moab_launcher] \
        or scheduler in [torque_scheduler, moab_scheduler]:
            ezdefaults['workdir'] = os.path.expanduser("~")

    from dill.temp import dump, dump_source
    # write func source to a NamedTemporaryFile (instead of pickle.dump)
    # ezrun requires 'FUNC = <function>' to be included as module.FUNC
    modfile = dump_source(func, alias='FUNC', dir=ezdefaults['workdir'])
    # standard pickle.dump of inputs to a NamedTemporaryFile
    kwd = {'onall':kwds.get('onall',True)}
    argfile = dump((arglist,kwd), suffix='.arg', dir=ezdefaults['workdir'])
    # Keep the above return values for as long as you want the tempfile to exist

    resfilename = tempfile.mktemp(dir=ezdefaults['workdir'])
    modname = os.path.splitext(os.path.basename(modfile.name))[0] 
    ezdefaults['progargs'] = ' '.join([modname, argfile.name, resfilename, \
                                       ezdefaults['workdir']])
    #HOLD.append(modfile)
    #HOLD.append(argfile)

    if launcher in [torque_launcher, moab_launcher] \
    or scheduler in [torque_scheduler, moab_scheduler]:
        jobfilename = tempfile.mktemp(dir=ezdefaults['workdir'])
        outfilename = tempfile.mktemp(dir=ezdefaults['workdir'])
        errfilename = tempfile.mktemp(dir=ezdefaults['workdir'])
        ezdefaults['jobfile'] = jobfilename
        ezdefaults['outfile'] = outfilename
        ezdefaults['errfile'] = errfilename

    # get the appropriate launcher for the scheduler
    if scheduler in [torque_scheduler] and launcher in [mpirun_launcher]:
        launcher = torque_launcher
        ezdefaults['scheduler'] = scheduler().mpirun
    elif scheduler in [moab_scheduler] and launcher in [mpirun_launcher]:
        launcher = moab_launcher
        ezdefaults['scheduler'] = scheduler().mpirun

    elif scheduler in [torque_scheduler] and launcher in [srun_launcher]:
        launcher = torque_launcher
        ezdefaults['scheduler'] = scheduler().srun
    elif scheduler in [moab_scheduler] and launcher in [srun_launcher]:
        launcher = moab_launcher
        ezdefaults['scheduler'] = scheduler().srun

    elif scheduler in [torque_scheduler] and launcher in [aprun_launcher]:
        launcher = torque_launcher
        ezdefaults['scheduler'] = scheduler().aprun
    elif scheduler in [moab_scheduler] and launcher in [aprun_launcher]:
        launcher = moab_launcher
        ezdefaults['scheduler'] = scheduler().aprun

    elif scheduler in [torque_scheduler] and launcher in [serial_launcher]:
        launcher = torque_launcher
        ezdefaults['scheduler'] = scheduler().serial
    elif scheduler in [moab_scheduler] and launcher in [serial_launcher]:
        launcher = moab_launcher
        ezdefaults['scheduler'] = scheduler().serial
    #else: scheduler = None

    # counting on the function below to block until done.
    #print 'executing: ', launcher(ezdefaults)
    launch(launcher(ezdefaults)) #FIXME: use subprocessing

    if launcher in [torque_launcher, moab_launcher] \
    or scheduler in [torque_scheduler, moab_scheduler]:
        import time                              #BLOCKING
        while (not os.path.exists(resfilename)): #XXX: or out* to confirm start
            time.sleep(sleeptime) #XXX: wait for results... may infinite loop?
        subprocess.call('rm -f %s' % jobfilename, shell=True)
        subprocess.call('rm -f %s' % outfilename, shell=True)
        subprocess.call('rm -f %s' % errfilename, shell=True)

    # debuggery... output = function(inputs)
   #subprocess.call('cp -f %s modfile.py' % modfile.name, shell=True) # getsource; FUNC=func
   #subprocess.call('cp -f %s argfile.py' % argfile.name, shell=True) # pickled list of inputs
   #subprocess.call('cp -f %s resfile.py' % resfilename, shell=True)  # pickled list of output

    # read result back
    res = pickle.load(open(resfilename,'rb'))
    subprocess.call('rm -f %s' % resfilename, shell=True)
    subprocess.call('rm -f %sc' % modfile.name, shell=True)
    return res
コード例 #8
0
 def _pickleargs(self, args, kwds):
     """pickle.dump args and kwds to tempfile"""
     # standard pickle.dump of inputs to a NamedTemporaryFile
     return dump((args, kwds), suffix='.arg', dir=self.workdir)
コード例 #9
0
f = lambda x: x**2
x = [1, 2, 3, 4, 5]

# source code to tempfile
pyfile = dump_source(f, alias='_f')
_f = load_source(pyfile)
assert _f(4) == f(4)

# source code to stream
pyfile = dumpIO_source(f, alias='_f')
_f = loadIO_source(pyfile)
assert _f(4) == f(4)

# pickle to tempfile
dumpfile = dump(x)
_x = load(dumpfile)
assert _x == x

# pickle to stream
dumpfile = dumpIO(x)
_x = loadIO(dumpfile)
assert _x == x

### now testing the objects ###
f = lambda x: x**2


def g(x):
    return f(x) - x
コード例 #10
0
ファイル: test_temp.py プロジェクト: mojaves-forks/dill
# License: 3-clause BSD.  The full license text is available at:
#  - http://trac.mystic.cacr.caltech.edu/project/pathos/browser/dill/LICENSE

from dill.temp import dump, dump_source, dumpIO, dumpIO_source
from dill.temp import load, load_source, loadIO, loadIO_source


f = lambda x: x**2
x = [1,2,3,4,5]

# source code to tempfile
pyfile = dump_source(f, alias='_f')
_f = load_source(pyfile)
assert _f(4) == f(4)

# source code to stream
pyfile = dumpIO_source(f, alias='_f')
_f = loadIO_source(pyfile)
assert _f(4) == f(4)

# pickle to tempfile
dumpfile = dump(x)
_x = load(dumpfile)
assert _x == x

# pickle to stream
dumpfile = dumpIO(x)
_x = loadIO(dumpfile)
assert _x == x

コード例 #11
0
ファイル: ez_map.py プロジェクト: uqfoundation/pyina
def ez_map(func, *arglist, **kwds):
    """higher-level map interface for selected mapper and launcher

maps function 'func' across arguments 'arglist'.  arguments and results
are stored and sent as pickled strings, while function 'func' is inspected
and written as a source file to be imported.

Further Input:
    nodes -- the number of parallel nodes
    launcher -- the launcher object
    scheduler -- the scheduler object
    mapper -- the mapper object
    timelimit -- string representation of maximum run time (e.g. '00:02')
    queue -- string name of selected queue (e.g. 'normal')
    """
    import dill as pickle
    import os.path, tempfile, subprocess
    from pyina.tools import which_strategy
    # mapper = None (allow for use of default mapper)
    if 'mapper' in kwds:
        mapper = kwds['mapper']
        if mapper() == "mpi_pool": scatter = False
        elif mapper() == "mpi_scatter": scatter = True
        else: raise NotImplementedError("Mapper '%s' not found." % mapper())
        ezdefaults['program'] = which_strategy(scatter, lazy=True)
    # override the defaults
    if 'nnodes' in kwds: ezdefaults['nodes'] = kwds['nnodes']
    if 'nodes' in kwds: ezdefaults['nodes'] = kwds['nodes']
    if 'timelimit' in kwds: ezdefaults['timelimit'] = kwds['timelimit']
    if 'queue' in kwds: ezdefaults['queue'] = kwds['queue']
    # set the scheduler & launcher (or use the given default)
    if 'launcher' in kwds: launcher = kwds['launcher']
    else: launcher = mpirun_launcher  #XXX: default = non_mpi?
    if 'scheduler' in kwds: scheduler = kwds['scheduler']
    else: scheduler = ''
    # set scratch directory (most often required for queue launcher)
    if 'workdir' in kwds: ezdefaults['workdir'] = kwds['workdir']
    else:
        if launcher in [torque_launcher, moab_launcher] \
        or scheduler in [torque_scheduler, moab_scheduler]:
            ezdefaults['workdir'] = os.path.expanduser("~")

    from dill.temp import dump, dump_source
    # write func source to a NamedTemporaryFile (instead of pickle.dump)
    # ezrun requires 'FUNC = <function>' to be included as module.FUNC
    modfile = dump_source(func, alias='FUNC', dir=ezdefaults['workdir'])
    # standard pickle.dump of inputs to a NamedTemporaryFile
    kwd = {'onall':kwds.get('onall',True)}
    argfile = dump((arglist,kwd), suffix='.arg', dir=ezdefaults['workdir'])
    # Keep the above return values for as long as you want the tempfile to exist

    resfilename = tempfile.mktemp(dir=ezdefaults['workdir'])
    modname = os.path.splitext(os.path.basename(modfile.name))[0] 
    ezdefaults['progargs'] = ' '.join([modname, argfile.name, resfilename, \
                                       ezdefaults['workdir']])
    #HOLD.append(modfile)
    #HOLD.append(argfile)

    if launcher in [torque_launcher, moab_launcher] \
    or scheduler in [torque_scheduler, moab_scheduler]:
        jobfilename = tempfile.mktemp(dir=ezdefaults['workdir'])
        outfilename = tempfile.mktemp(dir=ezdefaults['workdir'])
        errfilename = tempfile.mktemp(dir=ezdefaults['workdir'])
        ezdefaults['jobfile'] = jobfilename
        ezdefaults['outfile'] = outfilename
        ezdefaults['errfile'] = errfilename

    # get the appropriate launcher for the scheduler
    if scheduler in [torque_scheduler] and launcher in [mpirun_launcher]:
        launcher = torque_launcher
        ezdefaults['scheduler'] = scheduler().mpirun
    elif scheduler in [moab_scheduler] and launcher in [mpirun_launcher]:
        launcher = moab_launcher
        ezdefaults['scheduler'] = scheduler().mpirun

    elif scheduler in [torque_scheduler] and launcher in [srun_launcher]:
        launcher = torque_launcher
        ezdefaults['scheduler'] = scheduler().srun
    elif scheduler in [moab_scheduler] and launcher in [srun_launcher]:
        launcher = moab_launcher
        ezdefaults['scheduler'] = scheduler().srun

    elif scheduler in [torque_scheduler] and launcher in [aprun_launcher]:
        launcher = torque_launcher
        ezdefaults['scheduler'] = scheduler().aprun
    elif scheduler in [moab_scheduler] and launcher in [aprun_launcher]:
        launcher = moab_launcher
        ezdefaults['scheduler'] = scheduler().aprun

    elif scheduler in [torque_scheduler] and launcher in [serial_launcher]:
        launcher = torque_launcher
        ezdefaults['scheduler'] = scheduler().serial
    elif scheduler in [moab_scheduler] and launcher in [serial_launcher]:
        launcher = moab_launcher
        ezdefaults['scheduler'] = scheduler().serial
    #else: scheduler = None

    # counting on the function below to block until done.
    #print 'executing: ', launcher(ezdefaults)
    launch(launcher(ezdefaults)) #FIXME: use subprocessing

    if launcher in [torque_launcher, moab_launcher] \
    or scheduler in [torque_scheduler, moab_scheduler]:
        import time                              #BLOCKING
        while (not os.path.exists(resfilename)): #XXX: or out* to confirm start
            time.sleep(sleeptime) #XXX: wait for results... may infinite loop?
        subprocess.call('rm -f %s' % jobfilename, shell=True)
        subprocess.call('rm -f %s' % outfilename, shell=True)
        subprocess.call('rm -f %s' % errfilename, shell=True)

    # debuggery... output = function(inputs)
   #subprocess.call('cp -f %s modfile.py' % modfile.name, shell=True) # getsource; FUNC=func
   #subprocess.call('cp -f %s argfile.py' % argfile.name, shell=True) # pickled list of inputs
   #subprocess.call('cp -f %s resfile.py' % resfilename, shell=True)  # pickled list of output

    # read result back
    res = pickle.load(open(resfilename,'rb'))
    subprocess.call('rm -f %s' % resfilename, shell=True)
    subprocess.call('rm -f %sc' % modfile.name, shell=True)
    return res
コード例 #12
0
ファイル: ez_map.py プロジェクト: hpparvi/pyina
def ez_map2(func, *arglist, **kwds):
    """higher-level map interface for selected mapper and launcher

maps function 'func' across arguments 'arglist'.  arguments and results
are stored and sent as pickled strings, the function 'func' is also stored
and sent as pickled strings.  This is different than 'ez_map', in that
it does not use temporary files to store the mapped function.

Further Input:
    nodes -- the number of parallel nodes
    launcher -- the launcher object
    scheduler -- the scheduler object
    mapper -- the mapper object
    timelimit -- string representation of maximum run time (e.g. '00:02')
    queue -- string name of selected queue (e.g. 'normal')
"""
    import dill as pickle
    import os.path, tempfile, subprocess
    from pyina.tools import which_strategy
    # mapper = None (allow for use of default mapper)
    if kwds.has_key('mapper'):
        mapper = kwds['mapper']
        if mapper() == "mpi_pool": scatter = False
        elif mapper() == "mpi_scatter": scatter = True
        else: raise NotImplementedError, "Mapper '%s' not found." % mapper()
        ezdefaults['program'] = which_strategy(scatter, lazy=True)
    # override the defaults
    if kwds.has_key('nnodes'): ezdefaults['nodes'] = kwds['nnodes']
    if kwds.has_key('nodes'): ezdefaults['nodes'] = kwds['nodes']
    if kwds.has_key('timelimit'): ezdefaults['timelimit'] = kwds['timelimit']
    if kwds.has_key('queue'): ezdefaults['queue'] = kwds['queue']
    # set the scheduler & launcher (or use the given default)
    if kwds.has_key('launcher'): launcher = kwds['launcher']
    else: launcher = mpirun_launcher  #XXX: default = non_mpi?
    if kwds.has_key('scheduler'): scheduler = kwds['scheduler']
    else: scheduler = ''
    # set scratch directory (most often required for queue launcher)
    if kwds.has_key('workdir'): ezdefaults['workdir'] = kwds['workdir']
    else:
        if launcher in [torque_launcher, moab_launcher] \
        or scheduler in [torque_scheduler, moab_scheduler]:
            ezdefaults['workdir'] = os.path.expanduser("~")

    from dill.temp import dump
    # standard pickle.dump of inputs to a NamedTemporaryFile
    modfile = dump(func, suffix='.pik', dir=ezdefaults['workdir'])
    kwd = {'onall':kwds.get('onall',True)}
    argfile = dump((arglist,kwd), suffix='.arg', dir=ezdefaults['workdir'])
    # Keep the above return values for as long as you want the tempfile to exist

    resfilename = tempfile.mktemp(dir=ezdefaults['workdir'])
    ezdefaults['progargs'] = ' '.join([modfile.name,argfile.name,resfilename, \
                                       ezdefaults['workdir']])
    #HOLD.append(modfile)
    #HOLD.append(argfile)

    if launcher in [torque_launcher, moab_launcher] \
    or scheduler in [torque_scheduler, moab_scheduler]:
        jobfilename = tempfile.mktemp(dir=ezdefaults['workdir'])
        outfilename = tempfile.mktemp(dir=ezdefaults['workdir'])
        errfilename = tempfile.mktemp(dir=ezdefaults['workdir'])
        ezdefaults['jobfile'] = jobfilename
        ezdefaults['outfile'] = outfilename
        ezdefaults['errfile'] = errfilename

    # get the appropriate launcher for the scheduler
    if scheduler in [torque_scheduler] and launcher in [mpirun_launcher]:
        launcher = torque_launcher
        ezdefaults['scheduler'] = scheduler().mpirun
    elif scheduler in [moab_scheduler] and launcher in [mpirun_launcher]:
        launcher = moab_launcher
        ezdefaults['scheduler'] = scheduler().mpirun

    elif scheduler in [torque_scheduler] and launcher in [srun_launcher]:
        launcher = torque_launcher
        ezdefaults['scheduler'] = scheduler().srun
    elif scheduler in [moab_scheduler] and launcher in [srun_launcher]:
        launcher = moab_launcher
        ezdefaults['scheduler'] = scheduler().srun

    elif scheduler in [torque_scheduler] and launcher in [aprun_launcher]:
        launcher = torque_launcher
        ezdefaults['scheduler'] = scheduler().aprun
    elif scheduler in [moab_scheduler] and launcher in [aprun_launcher]:
        launcher = moab_launcher
        ezdefaults['scheduler'] = scheduler().aprun

    elif scheduler in [torque_scheduler] and launcher in [serial_launcher]:
        launcher = torque_launcher
        ezdefaults['scheduler'] = scheduler().serial
    elif scheduler in [moab_scheduler] and launcher in [serial_launcher]:
        launcher = moab_launcher
        ezdefaults['scheduler'] = scheduler().serial
    #else: scheduler = None

    # counting on the function below to block until done.
    #print 'executing: ', launcher(ezdefaults)
    launch(launcher(ezdefaults)) #FIXME: use subprocessing

    if launcher in [torque_launcher, moab_launcher] \
    or scheduler in [torque_scheduler, moab_scheduler]:
        import time                              #BLOCKING
        while (not os.path.exists(resfilename)): #XXX: or out* to confirm start
            time.sleep(sleeptime) #XXX: wait for results... may infinite loop?
        subprocess.call('rm -f %s' % jobfilename, shell=True)
        subprocess.call('rm -f %s' % outfilename, shell=True)
        subprocess.call('rm -f %s' % errfilename, shell=True)

    # read result back
    res = pickle.load(open(resfilename,'r'))
    subprocess.call('rm -f %s' % resfilename, shell=True)
    return res
コード例 #13
0
def ez_map2(func, *arglist, **kwds):
    """higher-level map interface for selected mapper and launcher

maps function 'func' across arguments 'arglist'.  arguments and results
are stored and sent as pickled strings, the function 'func' is also stored
and sent as pickled strings.  This is different than 'ez_map', in that
it does not use temporary files to store the mapped function.

Further Input:
    nodes -- the number of parallel nodes
    launcher -- the launcher object
    scheduler -- the scheduler object
    mapper -- the mapper object
    timelimit -- string representation of maximum run time (e.g. '00:02')
    queue -- string name of selected queue (e.g. 'normal')
"""
    import dill as pickle
    import os.path, tempfile, subprocess
    from pyina.tools import which_strategy
    # mapper = None (allow for use of default mapper)
    if kwds.has_key('mapper'):
        mapper = kwds['mapper']
        if mapper() == "mpi_pool": scatter = False
        elif mapper() == "mpi_scatter": scatter = True
        else: raise NotImplementedError, "Mapper '%s' not found." % mapper()
        ezdefaults['program'] = which_strategy(scatter, lazy=True)
    # override the defaults
    if kwds.has_key('nnodes'): ezdefaults['nodes'] = kwds['nnodes']
    if kwds.has_key('nodes'): ezdefaults['nodes'] = kwds['nodes']
    if kwds.has_key('timelimit'): ezdefaults['timelimit'] = kwds['timelimit']
    if kwds.has_key('queue'): ezdefaults['queue'] = kwds['queue']
    # set the scheduler & launcher (or use the given default)
    if kwds.has_key('launcher'): launcher = kwds['launcher']
    else: launcher = mpirun_launcher  #XXX: default = non_mpi?
    if kwds.has_key('scheduler'): scheduler = kwds['scheduler']
    else: scheduler = ''
    # set scratch directory (most often required for queue launcher)
    if kwds.has_key('workdir'): ezdefaults['workdir'] = kwds['workdir']
    else:
        if launcher in [torque_launcher, moab_launcher] \
        or scheduler in [torque_scheduler, moab_scheduler]:
            ezdefaults['workdir'] = os.path.expanduser("~")

    from dill.temp import dump
    # standard pickle.dump of inputs to a NamedTemporaryFile
    modfile = dump(func, suffix='.pik', dir=ezdefaults['workdir'])
    kwd = {'onall': kwds.get('onall', True)}
    argfile = dump((arglist, kwd), suffix='.arg', dir=ezdefaults['workdir'])
    # Keep the above return values for as long as you want the tempfile to exist

    resfilename = tempfile.mktemp(dir=ezdefaults['workdir'])
    ezdefaults['progargs'] = ' '.join([modfile.name,argfile.name,resfilename, \
                                       ezdefaults['workdir']])
    #HOLD.append(modfile)
    #HOLD.append(argfile)

    if launcher in [torque_launcher, moab_launcher] \
    or scheduler in [torque_scheduler, moab_scheduler]:
        jobfilename = tempfile.mktemp(dir=ezdefaults['workdir'])
        outfilename = tempfile.mktemp(dir=ezdefaults['workdir'])
        errfilename = tempfile.mktemp(dir=ezdefaults['workdir'])
        ezdefaults['jobfile'] = jobfilename
        ezdefaults['outfile'] = outfilename
        ezdefaults['errfile'] = errfilename

    # get the appropriate launcher for the scheduler
    if scheduler in [torque_scheduler] and launcher in [mpirun_launcher]:
        launcher = torque_launcher
        ezdefaults['scheduler'] = scheduler().mpirun
    elif scheduler in [moab_scheduler] and launcher in [mpirun_launcher]:
        launcher = moab_launcher
        ezdefaults['scheduler'] = scheduler().mpirun

    elif scheduler in [torque_scheduler] and launcher in [srun_launcher]:
        launcher = torque_launcher
        ezdefaults['scheduler'] = scheduler().srun
    elif scheduler in [moab_scheduler] and launcher in [srun_launcher]:
        launcher = moab_launcher
        ezdefaults['scheduler'] = scheduler().srun

    elif scheduler in [torque_scheduler] and launcher in [aprun_launcher]:
        launcher = torque_launcher
        ezdefaults['scheduler'] = scheduler().aprun
    elif scheduler in [moab_scheduler] and launcher in [aprun_launcher]:
        launcher = moab_launcher
        ezdefaults['scheduler'] = scheduler().aprun

    elif scheduler in [torque_scheduler] and launcher in [serial_launcher]:
        launcher = torque_launcher
        ezdefaults['scheduler'] = scheduler().serial
    elif scheduler in [moab_scheduler] and launcher in [serial_launcher]:
        launcher = moab_launcher
        ezdefaults['scheduler'] = scheduler().serial
    #else: scheduler = None

    # counting on the function below to block until done.
    #print 'executing: ', launcher(ezdefaults)
    launch(launcher(ezdefaults))  #FIXME: use subprocessing

    if launcher in [torque_launcher, moab_launcher] \
    or scheduler in [torque_scheduler, moab_scheduler]:
        import time  #BLOCKING
        while (not os.path.exists(resfilename)
               ):  #XXX: or out* to confirm start
            time.sleep(sleeptime)  #XXX: wait for results... may infinite loop?
        subprocess.call('rm -f %s' % jobfilename, shell=True)
        subprocess.call('rm -f %s' % outfilename, shell=True)
        subprocess.call('rm -f %s' % errfilename, shell=True)

    # read result back
    res = pickle.load(open(resfilename, 'r'))
    subprocess.call('rm -f %s' % resfilename, shell=True)
    return res