def start_remote_fit(problem, options, queue, notify): """ Queue remote fit. """ from jobqueue.client import connect try: from dill import dumps as dill_dumps dumps = lambda obj: dill_dumps(obj, recurse=True) except ImportError: from pickle import dumps data = dict(package='bumps', version=__version__, problem=dumps(problem), options=dumps(options)) request = dict( service='fitter', version=__version__, # fitter service version notify=notify, name=problem.title, data=data) server = connect(queue) job = server.submit(request) return job
def serve(dispatcher, queue): """ Run the work server. """ assert queue is not None next_request = { 'request': None } remote = connect(dispatcher) while True: if not next_request['request']: try: next_request = remote.nextjob(queue=queue) except: logging.error(traceback.format_exc()) if next_request['request']: jobid = next_request['id'] if jobid is None: logging.error('request has no job id') next_request = {'request': None} continue logging.info('processing job %s'%jobid) process = Process(target=runjob.run, args=(jobid,next_request['request'])) process.start() results, next_request = wait_for_result(remote, jobid, process, queue) thread.start_new_thread(update_remote, (dispatcher, jobid, queue, results)) else: time.sleep(POLLRATE)
def serve(dispatcher, queue): """ Run the work server. """ assert queue is not None next_request = {'request': None} remote = connect(dispatcher) while True: if not next_request['request']: try: next_request = remote.nextjob(queue=queue) except: logging.error(traceback.format_exc()) if next_request['request']: jobid = next_request['id'] if jobid is None: logging.error('request has no job id') next_request = {'request': None} continue logging.info('processing job %s' % jobid) process = Process(target=runjob.run, args=(jobid, next_request['request'])) process.start() results, next_request = wait_for_result(remote, jobid, process, queue) start_new_thread(update_remote, (dispatcher, jobid, queue, results)) else: time.sleep(POLLRATE)
def start_remote_fit(problem, options, queue, notify): """ Queue remote fit. """ from jobqueue.client import connect data = dict(package="bumps", version=__version__, problem=pickle.dumps(problem), options=pickle.dumps(options)) request = dict( service="fitter", version=__version__, notify=notify, name=problem.title, data=data # fitter service version ) server = connect(queue) job = server.submit(request) return job
def update_remote(dispatcher, id, queue, results): """ Update remote server with results. """ #print "updating remote" path= store.path(id) # Remove results key, if it is there try: store.delete(id, 'results') except KeyError: pass files = [os.path.join(path,f) for f in os.listdir(path)] #print "sending results",results # This is done with a separate connection to the server so that it can # run inside a thread. That way the server can start the next job # while the megabytes of results are being transfered in the background. private_remote = connect(dispatcher) private_remote.postjob(id=id, results=results, queue=queue, files=files) # Clean up files for f in files: os.unlink(f) os.rmdir(path)
def start_remote_fit(problem, options, queue, notify): """ Queue remote fit. """ from jobqueue.client import connect data = dict(package='bumps', version=__version__, problem=pickle.dumps(problem), options=pickle.dumps(options)) request = dict(service='fitter', version=__version__, # fitter service version notify=notify, name=problem.title, data=data) server = connect(queue) job = server.submit(request) return job
def update_remote(dispatcher, id, queue, results): """ Update remote server with results. """ #print "updating remote" path = store.path(id) # Remove results key, if it is there try: store.delete(id, 'results') except KeyError: pass files = [os.path.join(path, f) for f in os.listdir(path)] #print "sending results",results # This is done with a separate connection to the server so that it can # run inside a thread. That way the server can start the next job # while the megabytes of results are being transfered in the background. private_remote = connect(dispatcher) private_remote.postjob(id=id, results=results, queue=queue, files=files) # Clean up files for f in files: os.unlink(f) os.rmdir(path)
from __future__ import print_function from jobqueue.client import connect DEBUG = True #server = connect('http://reflectometry.org/queue') server = connect('http://localhost:5000') def checkqueue(pending=[], active=[], complete=[]): qpending = server.jobs('PENDING') qactive = server.jobs('ACTIVE') qcomplete = server.jobs('COMPLETE') if DEBUG: print("pending",qpending,"active",qactive,"complete",qcomplete) #assert pending == qpending #assert active == qactive #assert complete == qcomplete long = {'service':'count','data':1000000, 'name':'long count','notify':'me'} short = {'service':'count','data':200, 'name':'short count','notify':'me'} fail1 = {'service':'count','data':'string', 'name':'short count','notify':'me'} fail2 = {'service':'noservice','data':'string', 'name':'short count','notify':'me'} job = server.submit(int) print("submit",job) #import sys; sys.exit() checkqueue()
from __future__ import print_function from jobqueue.client import connect DEBUG = True #server = connect('http://reflectometry.org/queue') server = connect('http://localhost:5000') def checkqueue(pending=[], active=[], complete=[]): qpending = server.jobs('PENDING') qactive = server.jobs('ACTIVE') qcomplete = server.jobs('COMPLETE') if DEBUG: print("pending", qpending, "active", qactive, "complete", qcomplete) #assert pending == qpending #assert active == qactive #assert complete == qcomplete long = { 'service': 'count', 'data': 1000000, 'name': 'long count', 'notify': 'me' } short = { 'service': 'count', 'data': 200, 'name': 'short count',