def createWorkQueue(wq_port): global WORK_QUEUE work_queue.set_debug_flag('all') WORK_QUEUE = work_queue.WorkQueue(port=wq_port, catalog=True, exclusive=False, shutdown=False) WORK_QUEUE.specify_name('forcebalance') WORK_QUEUE.specify_keepalive_timeout(8640000) WORK_QUEUE.specify_keepalive_interval(8640000)
def run_quantum(): ESP = create_esp_surfaces(M) work_queue.set_debug_flag('all') wq = work_queue.WorkQueue(wq_port, exclusive=False, shutdown=False) wq.specify_name('forcebalance') os.makedirs('calcs') os.chdir('calcs') for i in range(M.ns): dnm = eval(formstr % i) os.makedirs(dnm) os.chdir(dnm) M.edit_qcrems({'igdesp': len(ESP[i])}) M.write("qchem.in", select=i) ESPBohr = np.array(ESP[i]) / bohrang np.savetxt('ESPGrid', ESPBohr) print "Queueing up job", dnm queue_up(wq, command='qchem40 qchem.in qchem.out', input_files=["qchem.in", "ESPGrid"], output_files=["qchem.out", "plot.esp", "efield.dat"], verbose=False) os.chdir('..') for i in range(M.ns): wq_wait(wq) os.chdir('..')
def __call__(self): ################################################## Debugging if self._debug is not None: ccl.set_debug_flag(self._debug) ################################################## Vanilla WorkQueue kws = dict() kws['port'] = self._port kws['catalog'] = self._catalog if self._name is not None: kws['name'] = self._name kws['exclusive'] = self._exclusive kws['shutdown'] = self._shutdown q = ccl.WorkQueue(**kws) q.activate_fast_abort(self._fast_abort) if self._logfile is not None: q.specify_log(self._logfile) ################################################## Task Replication if self._replicate is not None: q = _wq.replication.WorkQueue(q, maxreplicas=self._replicate) return q
def createWorkQueue(wq_port, debug=True): global WORK_QUEUE if debug: work_queue.set_debug_flag('all') WORK_QUEUE = work_queue.WorkQueue(port=wq_port, catalog=True, exclusive=False, shutdown=False) WORK_QUEUE.specify_name('forcebalance') #WORK_QUEUE.specify_keepalive_timeout(8640000) WORK_QUEUE.specify_keepalive_interval(8640000)
def __init__(self, port): work_queue.set_debug_flag('all') wq = work_queue.WorkQueue(port=port, exclusive=False, shutdown=False) wq.tasks_failed = 0 # Counter for tasks that fail at the application level wq.specify_keepalive_interval(8640000) wq.specify_name('dihedral') print('Work Queue listening on %d' % (wq.port)) self.wq = wq
def createWorkQueue(wq_port, debug=True): global WORK_QUEUE if debug: work_queue.set_debug_flag('all') WORK_QUEUE = work_queue.WorkQueue(port=wq_port, catalog=True, exclusive=False, shutdown=False) WORK_QUEUE.tasks_failed = 0 # Counter for tasks that fail at the application level WORK_QUEUE.specify_name('forcebalance') #WORK_QUEUE.specify_keepalive_timeout(8640000) WORK_QUEUE.specify_keepalive_interval(8640000)
def __init__(self, port, name='dihedral'): work_queue.set_debug_flag('all') wq = work_queue.WorkQueue(port=port, exclusive=False, shutdown=False) wq.specify_keepalive_interval(8640000) wq.specify_name(name) self.wq = wq self.tasks_failed = 0 # Our own counter for tasks that failed self.queue_status = None self.last_print_time = 0 print('Work Queue listening on %d' % (wq.port), file=sys.stderr)
def __init__(self, port, name='dihedral'): work_queue.set_debug_flag('all') wq = work_queue.WorkQueue(port=port) wq.specify_keepalive_interval(8640000) wq.specify_name(name) self.wq = wq self.tasks_failed = 0 # Our own counter for tasks that failed self.queue_status = None self.last_print_time = 0 print('Work Queue listening on %d' % wq.port, file=sys.stderr)
def __init__(self, config, notifier): self.tasks = {} self.notifier = notifier try: self.project = config['project'] self.catalog_server = config['catalog_server'] self.catalog_port = int(config['catalog_port']) self.port = int(config['port']) self.log = config['log'] if config['debug']: wq.set_debug_flag('all') except KeyError: logger.exception("Invalid workqueue configuration") exit(1)
def _mk_wq(self): global _AWE_WORK_QUEUE if _AWE_WORK_QUEUE is not None: ### warn awe.log('WARNING: using previously created WorkQueue instance') else: if self.debug: WQ.set_debug_flag(self.debug) if self.wq_logfile: awe.util.makedirs_parent(self.wq_logfile) WQ.cctools_debug_config_file(self.wq_logfile) WQ.cctools_debug_config_file_size(0) if self.name: self.catalog = True wq = WQ.WorkQueue(name = self.name, port = self.port, shutdown = self.shutdown, catalog = self.catalog, exclusive = self.exclusive) wq.specify_algorithm(self.schedule) if self.monitor: wq.enable_monitoring(self.summaryfile) if self.capacity: wq.estimate_capacity() awe.log('Running on port %d...' % wq.port) if wq.name: awe.log('Using project name %s' % wq.name) if self.debug and self.wq_logfile: awe.log('Logging WorkQueue to %s' % self.wq_logfile) typ = type(self.fastabort) if typ is float or typ is int: wq.activate_fast_abort(self.fastabort) _AWE_WORK_QUEUE = wq awe.util.makedirs_parent(self.wqstats_logfile) _AWE_WORK_QUEUE.specify_log(self.wqstats_logfile) return _AWE_WORK_QUEUE
def _mk_wq(self): global _AWE_WORK_QUEUE if _AWE_WORK_QUEUE is not None: ### warn awe.log('WARNING: using previously created WorkQueue instance') else: if self.debug: WQ.set_debug_flag(self.debug) if self.wq_logfile: awe.util.makedirs_parent(self.wq_logfile) WQ.cctools_debug_config_file(self.wq_logfile) WQ.cctools_debug_config_file_size(0) if self.name: self.catalog = True wq = WQ.WorkQueue(name=self.name, port=self.port, shutdown=self.shutdown, catalog=self.catalog, exclusive=self.exclusive) wq.specify_algorithm(self.schedule) if self.monitor: wq.enable_monitoring(self.summaryfile) if self.capacity: wq.estimate_capacity() awe.log('Running on port %d...' % wq.port) if wq.name: awe.log('Using project name %s' % wq.name) if self.debug and self.wq_logfile: awe.log('Logging WorkQueue to %s' % self.wq_logfile) typ = type(self.fastabort) if typ is float or typ is int: wq.activate_fast_abort(self.fastabort) _AWE_WORK_QUEUE = wq awe.util.makedirs_parent(self.wqstats_logfile) _AWE_WORK_QUEUE.specify_log(self.wqstats_logfile) return _AWE_WORK_QUEUE
def run_quantum(): ESP = create_esp_surfaces(M) work_queue.set_debug_flag('all') wq = work_queue.WorkQueue(wq_port, exclusive=False, shutdown=False) wq.specify_name('forcebalance') os.makedirs('calcs') os.chdir('calcs') for i in range(M.ns): dnm = eval(formstr % i) os.makedirs(dnm) os.chdir(dnm) M.edit_qcrems({'igdesp':len(ESP[i])}) M.write("qchem.in", select=i) ESPBohr = np.array(ESP[i]) / bohr2ang np.savetxt('ESPGrid',ESPBohr) print("Queueing up job", dnm) queue_up(wq, command = 'qchem40 qchem.in qchem.out', input_files = ["qchem.in", "ESPGrid"], output_files = ["qchem.out", "plot.esp", "efield.dat"], verbose=False) os.chdir('..') for i in range(M.ns): wq_wait(wq) os.chdir('..')
""" Python-WorkQueue test """ from work_queue import Task, WorkQueue, set_debug_flag from work_queue import WORK_QUEUE_SCHEDULE_FCFS, WORK_QUEUE_SCHEDULE_FILES from work_queue import WORK_QUEUE_RANDOM_PORT from work_queue import WORK_QUEUE_OUTPUT # from workqueue import WORK_QUEUE_MASTER_MODE_STANDALONE, WORK_QUEUE_WORKER_MODE_SHARED from work_queue import WORK_QUEUE_TASK_ORDER_LIFO import os import sys import time set_debug_flag("debug") set_debug_flag("wq") wq = WorkQueue(WORK_QUEUE_RANDOM_PORT, name="workqueue_example", catalog=True, exclusive=False) os.environ["PATH"] = "../../../dttools/src:" + os.environ["PATH"] os.system("work_queue_worker -d all localhost %d &" % wq.port) print wq.name wq.specify_algorithm(WORK_QUEUE_SCHEDULE_FCFS) # wq.specify_name('workqueue_example') # wq.specify_master_mode(WORK_QUEUE_MASTER_MODE_STANDALONE) # wq.specify_worker_mode(WORK_QUEUE_WORKER_MODE_SHARED) wq.specify_task_order(WORK_QUEUE_TASK_ORDER_LIFO) if wq.empty():
def _mk_wq(self): """ Only one instance of WorkQueue should be run per process. This grants access to the WorkQueue singleton or else creates a new WorkQueue instance. This also ensures that the cctools WorkQueue object can handle more workers. Parameters: None Returns: The cctools WorkQueue singleton object """ global _AWE_WORK_QUEUE if _AWE_WORK_QUEUE is not None: ### warn awe.log('WARNING: using previously created WorkQueue instance') else: if self.debug: # Set up debugging parameters for the cctools WorkQueue object. # It has inbuilt debugging capabilities. WQ.set_debug_flag(self.debug) if self.wq_logfile: awe.util.makedirs_parent(self.wq_logfile) WQ.cctools_debug_config_file(self.wq_logfile) WQ.cctools_debug_config_file_size(0) if self.name: self.catalog = True # Create the cctools WorkQueue object wq = WQ.WorkQueue(name = self.name, port = self.port, shutdown = self.shutdown, catalog = self.catalog, exclusive = self.exclusive) # Specify the task scheduling algorithm wq.specify_algorithm(self.schedule) # Turn cctools WorkQueue object status monitoring on or off if self.monitor: wq.enable_monitoring(self.summaryfile) if self.capacity: # Determine the number of workers the WorkQueue object can handle wq.estimate_capacity() # Display information about this run of AWE-WQ awe.log('Running on port %d...' % wq.port) if wq.name: awe.log('Using project name %s' % wq.name) if self.debug and self.wq_logfile: awe.log('Logging WorkQueue to %s' % self.wq_logfile) # Set up fast abort procedures typ = type(self.fastabort) if typ is float or typ is int: wq.activate_fast_abort(self.fastabort) # Ensure that the singleton is set to the new instance _AWE_WORK_QUEUE = wq # Ensure that the singleton is logging to the correct files awe.util.makedirs_parent(self.wqstats_logfile) _AWE_WORK_QUEUE.specify_log(self.wqstats_logfile) # Return a reference to teh singleton return _AWE_WORK_QUEUE
#!/usr/bin/env python import work_queue import os work_queue.set_debug_flag('all') wq = work_queue.WorkQueue(port=work_queue.WORK_QUEUE_RANDOM_PORT, exclusive=False, shutdown=True) wq.specify_name('test') for i in range(5): task = work_queue.Task('date') task.specify_algorithm(work_queue.WORK_QUEUE_SCHEDULE_FCFS) task.specify_tag('current date/time [%d]' % i) task.specify_input_file('/bin/date') print task.id print task.algorithm print task.command print task.tag wq.submit(task) os.system('work_queue_worker -d all -t 5 localhost %d &' % wq.port) while not wq.empty(): print '** wait for task' task = wq.wait(1) if task: print 'task' print 'algorithm', task.algorithm
#!/usr/bin/env python import work_queue import os work_queue.set_debug_flag('all') wq = work_queue.WorkQueue(port=work_queue.WORK_QUEUE_RANDOM_PORT, exclusive=False, shutdown=True) wq.specify_name('test') for i in range(5): task = work_queue.Task('date') task.specify_algorithm(work_queue.WORK_QUEUE_SCHEDULE_FCFS) task.specify_tag('current date/time [%d]' % i) task.specify_input_file('/bin/date') print task.id print task.algorithm print task.command print task.tag wq.submit(task) os.environ['PATH'] = '../../../dttools/src:' + os.environ['PATH'] os.system('work_queue_worker -d all -t 5 localhost %d &' % wq.port) while not wq.empty(): print '** wait for task' task = wq.wait(1) if task: print 'task'
# This software is distributed under the GNU General Public License. # See the file COPYING for details. """ Python-WorkQueue test """ from work_queue import Task, WorkQueue, set_debug_flag from work_queue import WORK_QUEUE_SCHEDULE_FCFS, WORK_QUEUE_SCHEDULE_FILES from work_queue import WORK_QUEUE_RANDOM_PORT from work_queue import WORK_QUEUE_OUTPUT #from workqueue import WORK_QUEUE_MASTER_MODE_STANDALONE, WORK_QUEUE_WORKER_MODE_SHARED from work_queue import WORK_QUEUE_TASK_ORDER_LIFO import os import sys import time set_debug_flag('debug') set_debug_flag('wq') wq = WorkQueue(WORK_QUEUE_RANDOM_PORT, name='workqueue_example', catalog=True, exclusive=False) os.environ['PATH'] = '../../../work_queue/src:' + os.environ['PATH'] os.system('work_queue_worker -d all localhost %d &' % wq.port) print wq.name wq.specify_algorithm(WORK_QUEUE_SCHEDULE_FCFS) #wq.specify_name('workqueue_example') #wq.specify_master_mode(WORK_QUEUE_MASTER_MODE_STANDALONE) #wq.specify_worker_mode(WORK_QUEUE_WORKER_MODE_SHARED)
# This software is distributed under the GNU General Public License. # See the file COPYING for details. """ Python-WorkQueue test """ from work_queue import Task, WorkQueue, set_debug_flag from work_queue import WORK_QUEUE_SCHEDULE_FCFS, WORK_QUEUE_SCHEDULE_FILES from work_queue import WORK_QUEUE_RANDOM_PORT from work_queue import WORK_QUEUE_OUTPUT #from workqueue import WORK_QUEUE_MASTER_MODE_STANDALONE, WORK_QUEUE_WORKER_MODE_SHARED import os import sys import time set_debug_flag('debug') set_debug_flag('wq') wq = WorkQueue(WORK_QUEUE_RANDOM_PORT, name='workqueue_example', catalog=False, exclusive=False) os.system('work_queue_worker -d all localhost %d &' % wq.port) print wq.name wq.specify_algorithm(WORK_QUEUE_SCHEDULE_FCFS) #wq.specify_name('workqueue_example') #wq.specify_master_mode(WORK_QUEUE_MASTER_MODE_STANDALONE) #wq.specify_worker_mode(WORK_QUEUE_WORKER_MODE_SHARED) if wq.empty(): print 'work queue is empty'