Exemple #1
0
 def _modularize(self, func):
     """pickle.dump function to tempfile"""
     if not self.source:
         # standard pickle.dump of inputs to a NamedTemporaryFile
         return dump(func, suffix='.pik', dir=self.workdir)
     # write func source to a NamedTemporaryFile (instead of pickle.dump)
     # ez*.py requires 'FUNC = <function>' to be included as module.FUNC
     return dump_source(func, alias='FUNC', dir=self.workdir)
Exemple #2
0
 def _modularize(self, func):
     """pickle.dump function to tempfile"""
     if not self.source:
         # standard pickle.dump of inputs to a NamedTemporaryFile
         return dump(func, suffix='.pik', dir=self.workdir)
     # write func source to a NamedTemporaryFile (instead of pickle.dump)
     # ez*.py requires 'FUNC = <function>' to be included as module.FUNC
     return dump_source(func, alias='FUNC', dir=self.workdir)
def test_code_to_tempfile():
    if not WINDOWS:  #see: https://bugs.python.org/issue14243
        pyfile = dump_source(f, alias='_f')
        _f = load_source(pyfile)
        assert _f(4) == f(4)
Exemple #4
0
def test_code_to_tempfile():
    if not WINDOWS:  #see: https://bugs.python.org/issue14243
        pyfile = dump_source(f, alias='_f')
        _f = load_source(pyfile)
        assert _f(4) == f(4)
Exemple #5
0
def ez_map(func, *arglist, **kwds):
    """higher-level map interface for selected mapper and launcher

maps function 'func' across arguments 'arglist'.  arguments and results
are stored and sent as pickled strings, while function 'func' is inspected
and written as a source file to be imported.

Further Input:
    nodes -- the number of parallel nodes
    launcher -- the launcher object
    scheduler -- the scheduler object
    mapper -- the mapper object
    timelimit -- string representation of maximum run time (e.g. '00:02')
    queue -- string name of selected queue (e.g. 'normal')
    """
    import dill as pickle
    import os.path, tempfile, subprocess
    from pyina.tools import which_strategy
    # mapper = None (allow for use of default mapper)
    if 'mapper' in kwds:
        mapper = kwds['mapper']
        if mapper() == "mpi_pool": scatter = False
        elif mapper() == "mpi_scatter": scatter = True
        else: raise NotImplementedError("Mapper '%s' not found." % mapper())
        ezdefaults['program'] = which_strategy(scatter, lazy=True)
    # override the defaults
    if 'nnodes' in kwds: ezdefaults['nodes'] = kwds['nnodes']
    if 'nodes' in kwds: ezdefaults['nodes'] = kwds['nodes']
    if 'timelimit' in kwds: ezdefaults['timelimit'] = kwds['timelimit']
    if 'queue' in kwds: ezdefaults['queue'] = kwds['queue']
    # set the scheduler & launcher (or use the given default)
    if 'launcher' in kwds: launcher = kwds['launcher']
    else: launcher = mpirun_launcher  #XXX: default = non_mpi?
    if 'scheduler' in kwds: scheduler = kwds['scheduler']
    else: scheduler = ''
    # set scratch directory (most often required for queue launcher)
    if 'workdir' in kwds: ezdefaults['workdir'] = kwds['workdir']
    else:
        if launcher in [torque_launcher, moab_launcher] \
        or scheduler in [torque_scheduler, moab_scheduler]:
            ezdefaults['workdir'] = os.path.expanduser("~")

    from dill.temp import dump, dump_source
    # write func source to a NamedTemporaryFile (instead of pickle.dump)
    # ezrun requires 'FUNC = <function>' to be included as module.FUNC
    modfile = dump_source(func, alias='FUNC', dir=ezdefaults['workdir'])
    # standard pickle.dump of inputs to a NamedTemporaryFile
    kwd = {'onall':kwds.get('onall',True)}
    argfile = dump((arglist,kwd), suffix='.arg', dir=ezdefaults['workdir'])
    # Keep the above return values for as long as you want the tempfile to exist

    resfilename = tempfile.mktemp(dir=ezdefaults['workdir'])
    modname = os.path.splitext(os.path.basename(modfile.name))[0] 
    ezdefaults['progargs'] = ' '.join([modname, argfile.name, resfilename, \
                                       ezdefaults['workdir']])
    #HOLD.append(modfile)
    #HOLD.append(argfile)

    if launcher in [torque_launcher, moab_launcher] \
    or scheduler in [torque_scheduler, moab_scheduler]:
        jobfilename = tempfile.mktemp(dir=ezdefaults['workdir'])
        outfilename = tempfile.mktemp(dir=ezdefaults['workdir'])
        errfilename = tempfile.mktemp(dir=ezdefaults['workdir'])
        ezdefaults['jobfile'] = jobfilename
        ezdefaults['outfile'] = outfilename
        ezdefaults['errfile'] = errfilename

    # get the appropriate launcher for the scheduler
    if scheduler in [torque_scheduler] and launcher in [mpirun_launcher]:
        launcher = torque_launcher
        ezdefaults['scheduler'] = scheduler().mpirun
    elif scheduler in [moab_scheduler] and launcher in [mpirun_launcher]:
        launcher = moab_launcher
        ezdefaults['scheduler'] = scheduler().mpirun

    elif scheduler in [torque_scheduler] and launcher in [srun_launcher]:
        launcher = torque_launcher
        ezdefaults['scheduler'] = scheduler().srun
    elif scheduler in [moab_scheduler] and launcher in [srun_launcher]:
        launcher = moab_launcher
        ezdefaults['scheduler'] = scheduler().srun

    elif scheduler in [torque_scheduler] and launcher in [aprun_launcher]:
        launcher = torque_launcher
        ezdefaults['scheduler'] = scheduler().aprun
    elif scheduler in [moab_scheduler] and launcher in [aprun_launcher]:
        launcher = moab_launcher
        ezdefaults['scheduler'] = scheduler().aprun

    elif scheduler in [torque_scheduler] and launcher in [serial_launcher]:
        launcher = torque_launcher
        ezdefaults['scheduler'] = scheduler().serial
    elif scheduler in [moab_scheduler] and launcher in [serial_launcher]:
        launcher = moab_launcher
        ezdefaults['scheduler'] = scheduler().serial
    #else: scheduler = None

    # counting on the function below to block until done.
    #print 'executing: ', launcher(ezdefaults)
    launch(launcher(ezdefaults)) #FIXME: use subprocessing

    if launcher in [torque_launcher, moab_launcher] \
    or scheduler in [torque_scheduler, moab_scheduler]:
        import time                              #BLOCKING
        while (not os.path.exists(resfilename)): #XXX: or out* to confirm start
            time.sleep(sleeptime) #XXX: wait for results... may infinite loop?
        subprocess.call('rm -f %s' % jobfilename, shell=True)
        subprocess.call('rm -f %s' % outfilename, shell=True)
        subprocess.call('rm -f %s' % errfilename, shell=True)

    # debuggery... output = function(inputs)
   #subprocess.call('cp -f %s modfile.py' % modfile.name, shell=True) # getsource; FUNC=func
   #subprocess.call('cp -f %s argfile.py' % argfile.name, shell=True) # pickled list of inputs
   #subprocess.call('cp -f %s resfile.py' % resfilename, shell=True)  # pickled list of output

    # read result back
    res = pickle.load(open(resfilename,'rb'))
    subprocess.call('rm -f %s' % resfilename, shell=True)
    subprocess.call('rm -f %sc' % modfile.name, shell=True)
    return res
Exemple #6
0
def run_files(obj):
    f = temp.dump_source(obj, alias='_obj')
    _obj = temp.load_source(f)
    assert _obj(1.57) == obj(1.57)
Exemple #7
0
#!/usr/bin/env python
#
# Author: Mike McKerns (mmckerns @caltech and @uqfoundation)
# Copyright (c) 2008-2015 California Institute of Technology.
# License: 3-clause BSD.  The full license text is available at:
#  - http://trac.mystic.cacr.caltech.edu/project/pathos/browser/dill/LICENSE

from dill.temp import dump, dump_source, dumpIO, dumpIO_source
from dill.temp import load, load_source, loadIO, loadIO_source

f = lambda x: x**2
x = [1, 2, 3, 4, 5]

# source code to tempfile
pyfile = dump_source(f, alias='_f')
_f = load_source(pyfile)
assert _f(4) == f(4)

# source code to stream
pyfile = dumpIO_source(f, alias='_f')
_f = loadIO_source(pyfile)
assert _f(4) == f(4)

# pickle to tempfile
dumpfile = dump(x)
_x = load(dumpfile)
assert _x == x

# pickle to stream
dumpfile = dumpIO(x)
_x = loadIO(dumpfile)
Exemple #8
0
#!/usr/bin/env python
#
# Author: Mike McKerns (mmckerns @caltech and @uqfoundation)
# Copyright (c) 2008-2014 California Institute of Technology.
# License: 3-clause BSD.  The full license text is available at:
#  - http://trac.mystic.cacr.caltech.edu/project/pathos/browser/dill/LICENSE

from dill.temp import dump, dump_source, dumpIO, dumpIO_source
from dill.temp import load, load_source, loadIO, loadIO_source


f = lambda x: x**2
x = [1,2,3,4,5]

# source code to tempfile
pyfile = dump_source(f, alias='_f')
_f = load_source(pyfile)
assert _f(4) == f(4)

# source code to stream
pyfile = dumpIO_source(f, alias='_f')
_f = loadIO_source(pyfile)
assert _f(4) == f(4)

# pickle to tempfile
dumpfile = dump(x)
_x = load(dumpfile)
assert _x == x

# pickle to stream
dumpfile = dumpIO(x)
Exemple #9
0
def ez_map(func, *arglist, **kwds):
    """higher-level map interface for selected mapper and launcher

maps function 'func' across arguments 'arglist'.  arguments and results
are stored and sent as pickled strings, while function 'func' is inspected
and written as a source file to be imported.

Further Input:
    nodes -- the number of parallel nodes
    launcher -- the launcher object
    scheduler -- the scheduler object
    mapper -- the mapper object
    timelimit -- string representation of maximum run time (e.g. '00:02')
    queue -- string name of selected queue (e.g. 'normal')
    """
    import dill as pickle
    import os.path, tempfile, subprocess
    from pyina.tools import which_strategy
    # mapper = None (allow for use of default mapper)
    if 'mapper' in kwds:
        mapper = kwds['mapper']
        if mapper() == "mpi_pool": scatter = False
        elif mapper() == "mpi_scatter": scatter = True
        else: raise NotImplementedError("Mapper '%s' not found." % mapper())
        ezdefaults['program'] = which_strategy(scatter, lazy=True)
    # override the defaults
    if 'nnodes' in kwds: ezdefaults['nodes'] = kwds['nnodes']
    if 'nodes' in kwds: ezdefaults['nodes'] = kwds['nodes']
    if 'timelimit' in kwds: ezdefaults['timelimit'] = kwds['timelimit']
    if 'queue' in kwds: ezdefaults['queue'] = kwds['queue']
    # set the scheduler & launcher (or use the given default)
    if 'launcher' in kwds: launcher = kwds['launcher']
    else: launcher = mpirun_launcher  #XXX: default = non_mpi?
    if 'scheduler' in kwds: scheduler = kwds['scheduler']
    else: scheduler = ''
    # set scratch directory (most often required for queue launcher)
    if 'workdir' in kwds: ezdefaults['workdir'] = kwds['workdir']
    else:
        if launcher in [torque_launcher, moab_launcher] \
        or scheduler in [torque_scheduler, moab_scheduler]:
            ezdefaults['workdir'] = os.path.expanduser("~")

    from dill.temp import dump, dump_source
    # write func source to a NamedTemporaryFile (instead of pickle.dump)
    # ezrun requires 'FUNC = <function>' to be included as module.FUNC
    modfile = dump_source(func, alias='FUNC', dir=ezdefaults['workdir'])
    # standard pickle.dump of inputs to a NamedTemporaryFile
    kwd = {'onall':kwds.get('onall',True)}
    argfile = dump((arglist,kwd), suffix='.arg', dir=ezdefaults['workdir'])
    # Keep the above return values for as long as you want the tempfile to exist

    resfilename = tempfile.mktemp(dir=ezdefaults['workdir'])
    modname = os.path.splitext(os.path.basename(modfile.name))[0] 
    ezdefaults['progargs'] = ' '.join([modname, argfile.name, resfilename, \
                                       ezdefaults['workdir']])
    #HOLD.append(modfile)
    #HOLD.append(argfile)

    if launcher in [torque_launcher, moab_launcher] \
    or scheduler in [torque_scheduler, moab_scheduler]:
        jobfilename = tempfile.mktemp(dir=ezdefaults['workdir'])
        outfilename = tempfile.mktemp(dir=ezdefaults['workdir'])
        errfilename = tempfile.mktemp(dir=ezdefaults['workdir'])
        ezdefaults['jobfile'] = jobfilename
        ezdefaults['outfile'] = outfilename
        ezdefaults['errfile'] = errfilename

    # get the appropriate launcher for the scheduler
    if scheduler in [torque_scheduler] and launcher in [mpirun_launcher]:
        launcher = torque_launcher
        ezdefaults['scheduler'] = scheduler().mpirun
    elif scheduler in [moab_scheduler] and launcher in [mpirun_launcher]:
        launcher = moab_launcher
        ezdefaults['scheduler'] = scheduler().mpirun

    elif scheduler in [torque_scheduler] and launcher in [srun_launcher]:
        launcher = torque_launcher
        ezdefaults['scheduler'] = scheduler().srun
    elif scheduler in [moab_scheduler] and launcher in [srun_launcher]:
        launcher = moab_launcher
        ezdefaults['scheduler'] = scheduler().srun

    elif scheduler in [torque_scheduler] and launcher in [aprun_launcher]:
        launcher = torque_launcher
        ezdefaults['scheduler'] = scheduler().aprun
    elif scheduler in [moab_scheduler] and launcher in [aprun_launcher]:
        launcher = moab_launcher
        ezdefaults['scheduler'] = scheduler().aprun

    elif scheduler in [torque_scheduler] and launcher in [serial_launcher]:
        launcher = torque_launcher
        ezdefaults['scheduler'] = scheduler().serial
    elif scheduler in [moab_scheduler] and launcher in [serial_launcher]:
        launcher = moab_launcher
        ezdefaults['scheduler'] = scheduler().serial
    #else: scheduler = None

    # counting on the function below to block until done.
    #print 'executing: ', launcher(ezdefaults)
    launch(launcher(ezdefaults)) #FIXME: use subprocessing

    if launcher in [torque_launcher, moab_launcher] \
    or scheduler in [torque_scheduler, moab_scheduler]:
        import time                              #BLOCKING
        while (not os.path.exists(resfilename)): #XXX: or out* to confirm start
            time.sleep(sleeptime) #XXX: wait for results... may infinite loop?
        subprocess.call('rm -f %s' % jobfilename, shell=True)
        subprocess.call('rm -f %s' % outfilename, shell=True)
        subprocess.call('rm -f %s' % errfilename, shell=True)

    # debuggery... output = function(inputs)
   #subprocess.call('cp -f %s modfile.py' % modfile.name, shell=True) # getsource; FUNC=func
   #subprocess.call('cp -f %s argfile.py' % argfile.name, shell=True) # pickled list of inputs
   #subprocess.call('cp -f %s resfile.py' % resfilename, shell=True)  # pickled list of output

    # read result back
    res = pickle.load(open(resfilename,'rb'))
    subprocess.call('rm -f %s' % resfilename, shell=True)
    subprocess.call('rm -f %sc' % modfile.name, shell=True)
    return res
Exemple #10
0
from dill.temp import dump, dump_source, dumpIO, dumpIO_source
from dill.temp import load, load_source, loadIO, loadIO_source


f = lambda x: x ** 2
x = [1, 2, 3, 4, 5]

# source code to tempfile
pyfile = dump_source(f, alias="_f")
_f = load_source(pyfile)
assert _f(4) == f(4)

# source code to stream
pyfile = dumpIO_source(f, alias="_f")
_f = loadIO_source(pyfile)
assert _f(4) == f(4)

# pickle to tempfile
dumpfile = dump(x)
_x = load(dumpfile)
assert _x == x

# pickle to stream
dumpfile = dumpIO(x)
_x = loadIO(dumpfile)
assert _x == x
Exemple #11
0
def test_files(obj):
    f = temp.dump_source(obj, alias='_obj')
    _obj = temp.load_source(f)
    assert _obj(1.57) == obj(1.57)