def spawn_worker(self, queues): pid = os.fork() if not pid: Worker.run(queues, interval=1) os._exit(0) else: return pid
def pyres_worker(): """Worker CLI, lightly modified from pyres. """ usage = 'usage: %prog [options] arg1' parser = OptionParser(usage=usage) parser.add_option('--host', dest='host', default=settings['host']) parser.add_option('--port', dest='port', type='int', default=settings['port']) parser.add_option('--password', dest='password', default=settings['password']) parser.add_option('-i', '--interval', dest='interval', default=None) parser.add_option('-l', '--log-level', dest='log_level', default='info') parser.add_option('-f', dest='logfile') parser.add_option('-p', dest='pidfile') parser.add_option('-t', '--timeout', dest='timeout') options, args = parser.parse_args() if len(args) != 1: parser.print_help() parser.error("Argument must be a comma seperated list of queues") log_level = getattr(logging, options.log_level.upper(), 'INFO') setup_logging(procname="pyres_worker", log_level=log_level, filename=options.logfile) setup_pidfile(options.pidfile) interval = options.interval if interval is not None: interval = int(interval) timeout = options.timeout and int(options.timeout) queues = args[0].split(',') server = '{0}:{1}'.format(options.host, options.port) Worker.run(queues, server, options.password, interval, timeout=timeout)
def pyres_worker(): usage = "usage: %prog [options] arg1" parser = OptionParser(usage=usage) parser.add_option("--host", dest="host", default="localhost") parser.add_option("--port",dest="port",type="int", default=6379) parser.add_option("-i", '--interval', dest='interval', default=None, help='the default time interval to sleep between runs') parser.add_option('-l', '--log-level', dest='log_level', default='info', help='log level. Valid values are "debug", "info", "warning", "error", "critical", in decreasing order of verbosity. Defaults to "info" if parameter not specified.') parser.add_option('-f', dest='logfile', help='If present, a logfile will be used.') parser.add_option("-n", "--nonblocking-pop", dest="blocking_pop", action="store_false", default=True, help="If absent, Pyres will use the Redis blocking pop (BLPOP) to obtain jobs from the queue(s). If present, Redis will use a non-blocking pop (LPOP) and will sleep for up to 8 seconds if no jobs are available.") (options,args) = parser.parse_args() if len(args) != 1: parser.print_help() parser.error("Argument must be a comma seperated list of queues") log_level = getattr(logging, options.log_level.upper(), 'INFO') setup_logging(log_level=log_level, filename=options.logfile) interval = options.interval if interval is not None: interval = int(interval) queues = args[0].split(',') server = '%s:%s' % (options.host,options.port) Worker.run(queues, server, options.blocking_pop, interval)
def test_working_on(self): name = "%s:%s:%s" % (os.uname()[1],os.getpid(),'basic') self.resq.enqueue(Basic,"test1") job = Job.reserve('basic', self.resq) worker = Worker(['basic']) worker.working_on(job) assert self.redis.exists("resque:worker:%s" % name)
def pyres_worker(): usage = "usage: %prog [options] arg1" parser = OptionParser(usage=usage) parser.add_option("--host", dest="host", default="localhost") parser.add_option("--port",dest="port",type="int", default=6379) parser.add_option("-i", '--interval', dest='interval', default=None, help='the default time interval to sleep between runs') parser.add_option('-l', '--log-level', dest='log_level', default='info', help='log level. Valid values are "debug", "info", "warning", "error", "critical", in decreasing order of verbosity. Defaults to "info" if parameter not specified.') parser.add_option('-f', dest='logfile', help='If present, a logfile will be used. "stderr", "stdout", and "syslog" are all special values.') parser.add_option('-p', dest='pidfile', help='If present, a pidfile will be used.') (options,args) = parser.parse_args() if len(args) != 1: parser.print_help() parser.error("Argument must be a comma seperated list of queues") log_level = getattr(logging, options.log_level.upper(), 'INFO') setup_logging(procname="pyres_worker", log_level=log_level, filename=options.logfile) setup_pidfile(options.pidfile) interval = options.interval if interval is not None: interval = int(interval) queues = args[0].split(',') server = '%s:%s' % (options.host,options.port) Worker.run(queues, server, interval)
def schedule_and_run_resque(): print 'scheduling' offset, upper_limit, payload = config['payload']['offset'], config['payload']['upper_limit'], config['payload']['payload'] while(offset < upper_limit): r.enqueue(FaceppAPIFetcher, offset, payload) offset += payload Worker.run([config['redis']['resque_queue_name']], server=config['redis']['redis_server'])
def handle_noargs(self, **options): queues = (environ.get('QUEUES') or environ.get('QUEUE') or getattr(settings, 'PYRES_QUEUES', None)) if not queues: raise CommandError( 'A list of queues should be specified for ' 'worker to run. Try set PYRES_QUEUES django settings ' 'variable or QUEUES environment variable, e.g.\n' '$ QUEUES=q1,q2 python2 management.py pyres_worker') if isinstance(queues, basestring): queues = queues.split(',') server = "%s:%d" % (getattr(settings, 'REDIS_HOST', 'localhost'), getattr(settings, 'REDIS_PORT', 6379)) try: interval = int(options.get('interval')) except ValueError: raise CommandError('Interval must be an integer') log_level = getattr(logging, options.get('log_level').upper(), 'INFO') setup_logging("pyres", log_level=log_level, filename=options.get('log_file')) Worker.run(queues, server, interval)
def pyres_worker(): usage = "usage: %prog [options] arg1" parser = OptionParser(usage=usage) parser.add_option("--host", dest="host", default="localhost") parser.add_option("--port", dest="port", type="int", default=6379) parser.add_option( "-i", "--interval", dest="interval", default=None, help="the default time interval to sleep between runs" ) parser.add_option( "-l", "--log-level", dest="log_level", default="info", help='log level. Valid values are "debug", "info", "warning", "error", "critical", in decreasing order of verbosity. Defaults to "info" if parameter not specified.', ) parser.add_option("-f", dest="logfile", help="If present, a logfile will be used.") (options, args) = parser.parse_args() if len(args) != 1: parser.print_help() parser.error("Argument must be a comma seperated list of queues") log_level = getattr(logging, options.log_level.upper(), "INFO") setup_logging(log_level=log_level, filename=options.logfile) interval = options.interval if interval is not None: interval = float(interval) queues = args[0].split(",") server = "%s:%s" % (options.host, options.port) Worker.run(queues, server, interval)
def test_unregister(self): worker = Worker(['basic']) worker.register_worker() name = "%s:%s:%s" % (os.uname()[1],os.getpid(),'basic') assert self.redis.sismember('resque:workers',name) worker.unregister_worker() assert name not in self.redis.smembers('resque:workers')
def handle_noargs(self, **options): queues = (environ.get('QUEUES') or environ.get('QUEUE') or getattr(settings, 'PYRES_QUEUES', None)) if not queues: raise CommandError('A list of queues should be specified for ' 'worker to run. Try set PYRES_QUEUES django settings ' 'variable or QUEUES environment variable, e.g.\n' '$ QUEUES=q1,q2 python2 management.py pyres_worker') if isinstance(queues, basestring): queues = queues.split(',') server = "%s:%d" % (getattr(settings, 'REDIS_HOST', 'localhost'), getattr(settings, 'REDIS_PORT', 6379)) try: interval = int(options.get('interval')) except ValueError: raise CommandError('Interval must be an integer') log_level = getattr(logging, options.get('log_level').upper(), 'INFO') setup_logging("pyres", log_level=log_level, filename=options.get('log_file')) Worker.run(queues, server, interval)
def start_worker(queues, all, interval): """ Start a worker process to consume the queues in :queues. :interval is the number of seconds before tasks. Minimum of 1 sadly. """ selected_queues = map(str, queues) known_queues = resq.get_queue_names() unknown_queues = map(str, set(queues).difference(set(known_queues))) # gotta have at least one queue if not selected_queues: if all: selected_queues = known_queues else: message = "option --queues must be one or more of: %s. exiting." % known_queues logging.error(message) return # stop if one of the queues specified is not in the list of possible queues if unknown_queues: message = "received one or more unknown queue names: %s. exiting." % list(unknown_queues) logging.error(message) return logging.info("Starting worker for %s with interval of %s second(s), for %s" % (selected_queues, interval, resq._REDIS)) try: Worker.run(selected_queues, resq._REDIS, interval) logging.info("started worker...") except Exception, e: logging.error("error starting worker: %s. exiting." % e)
def test_job_failure(self): self.resq.enqueue(ErrorObject) worker = Worker(['basic']) worker.process() name = "%s:%s:%s" % (os.uname()[1],os.getpid(),'basic') assert not self.redis.get('resque:worker:%s' % worker) assert self.redis.get("resque:stat:failed") == str(1) assert self.redis.get("resque:stat:failed:%s" % name) == str(1)
def test_started(self): worker = Worker(['basic']) dt = datetime.datetime.utcnow() worker.started = dt name = "%s:%s:%s" % (os.uname()[1],os.getpid(),'basic') assert self.redis.get('resque:worker:%s:started' % name) == str(int(calendar.timegm(dt.utctimetuple()))) assert worker.started == str(int(calendar.timegm(dt.utctimetuple()))) worker.started = None assert not self.redis.exists('resque:worker:%s:started' % name)
def working(request): workers = Worker.working(resq._server) template = env.get_template('working.html') dic = { 'all_workers':Worker.all(HOST), 'workers':workers, 'resq': resq } return str(template.render(dic))
def pyres_worker(): usage = "usage: %prog [options] arg1" parser = OptionParser(usage=usage) parser.add_option("--host", dest="host", default="localhost") parser.add_option("--port", dest="port", type="int", default=6379) parser.add_option("--password", dest="password", default=None) parser.add_option("-i", '--interval', dest='interval', default=None, help='the default time interval to sleep between runs') parser.add_option( '-l', '--log-level', dest='log_level', default='info', help= 'log level. Valid values are "debug", "info", "warning", "error", "critical", in decreasing order of verbosity. Defaults to "info" if parameter not specified.' ) parser.add_option( '-f', dest='logfile', help= 'If present, a logfile will be used. "stderr", "stdout", and "syslog" are all special values.' ) parser.add_option('-p', dest='pidfile', help='If present, a pidfile will be used.') parser.add_option("-t", '--timeout', dest='timeout', default=None, help='the timeout in seconds for this worker') (options, args) = parser.parse_args() if len(args) != 1: parser.print_help() parser.error("Argument must be a comma seperated list of queues") log_level = getattr(logging, options.log_level.upper(), 'INFO') setup_logging(procname="pyres_worker", log_level=log_level, filename=options.logfile) setup_pidfile(options.pidfile) interval = options.interval if interval is not None: interval = int(interval) timeout = options.timeout and int(options.timeout) queues = args[0].split(',') server = '%s:%s' % (options.host, options.port) password = options.password Worker.run(queues, server, password, interval, timeout=timeout)
def test_started(self): import datetime worker = Worker(['basic']) dt = datetime.datetime.now() worker.started = dt name = "%s:%s:%s" % (os.uname()[1],os.getpid(),'basic') assert self.redis.get('resque:worker:%s:started' % name).decode() == str(int(time.mktime(dt.timetuple()))) assert worker.started.decode() == str(int(time.mktime(dt.timetuple()))) worker.started = None assert not self.redis.exists('resque:worker:%s:started' % name)
def test_startup(self): worker = Worker(['basic']) worker.startup() name = "%s:%s:%s" % (os.uname()[1],os.getpid(),'basic') assert self.redis.sismember('resque:workers',name) import signal assert signal.getsignal(signal.SIGTERM) == worker.shutdown_all assert signal.getsignal(signal.SIGINT) == worker.shutdown_all assert signal.getsignal(signal.SIGQUIT) == worker.schedule_shutdown assert signal.getsignal(signal.SIGUSR1) == worker.kill_child
def test_started(self): import datetime worker = Worker(['basic']) dt = datetime.datetime.now() worker.started = dt name = "%s:%s:%s" % (os.uname()[1],os.getpid(),'basic') assert self.redis.get('resque:worker:%s:started' % name) == str(int(time.mktime(dt.timetuple()))) assert worker.started == str(int(time.mktime(dt.timetuple()))) worker.started = None assert not self.redis.exists('resque:worker:%s:started' % name)
def test_state(self): worker = Worker(['basic']) assert worker.state() == 'idle' self.resq.enqueue_from_string('tests.Basic','basic','test1') worker.register_worker() job = Job.reserve('basic', self.resq) worker.working_on(job) assert worker.state() == 'working' worker.done_working() assert worker.state() == 'idle'
def test_enqueue_from_string(self): self.resq.enqueue_from_string('tests.Basic', 'basic', 'test1') name = "%s:%s:%s" % (os.uname()[1], os.getpid(), 'basic') assert self.redis.llen("resque:queue:basic") == 1 job = Job.reserve('basic', self.resq) worker = Worker(['basic']) worker.process(job) assert not self.redis.get('resque:worker:%s' % worker) assert not self.redis.get("resque:stat:failed") assert not self.redis.get("resque:stat:failed:%s" % name)
def test_enqueue_from_string(self): self.resq.enqueue_from_string("tests.Basic", "basic", "test1") name = "%s:%s:%s" % (os.uname()[1], os.getpid(), "basic") assert self.redis.llen("resque:queue:basic") == 1 job = Job.reserve("basic", self.resq) worker = Worker(["basic"]) worker.process(job) assert not self.redis.get("resque:worker:%s" % worker) assert not self.redis.get("resque:stat:failed") assert not self.redis.get("resque:stat:failed:%s" % name)
def test_enqueue_from_string(self): self.resq.enqueue_from_string('tests.Basic','basic','test1') name = "%s:%s:%s" % (os.uname()[1],os.getpid(),'basic') assert self.redis.llen("resque:queue:basic") == 1 job = Job.reserve('basic', self.resq) worker = Worker(['basic']) worker.process(job) assert not self.redis.get('resque:worker:%s' % worker) assert not self.redis.get("resque:stat:failed") assert not self.redis.get("resque:stat:failed:%s" % name)
def test_started(self): import datetime worker = Worker(['basic']) dt = datetime.datetime.now() worker.started = dt name = "%s:%s:%s" % (os.uname()[1],os.getpid(),'basic') assert self.redis.get('resque:worker:%s:started' % name) == dt.strftime('%Y-%m-%d %H:%M:%S') assert worker.started == datetime.datetime.strptime(dt.strftime('%Y-%m-%d %H:%M:%S'),'%Y-%m-%d %H:%M:%S') worker.started = None assert not self.redis.exists('resque:worker:%s:started' % name)
def test_info(self): self.resq.enqueue(Basic,"test1") self.resq.enqueue(TestProcess) info = self.resq.info() assert info['queues'] == 2 assert info['servers'] == ['localhost:6379'] assert info['workers'] == 0 worker = Worker(['basic']) worker.register_worker() info = self.resq.info() assert info['workers'] == 1
def main(): args = parse_arguments() app = create_app(args.conf, debug=args.debug) if args.queue: queue = args.queue.split(',') else: queue = app.config['DEFAULT_QUEUES'] with app.app_context(): Worker.run(queue, server="%s:%s" % (app.config['REDIS_HOST'], app.config['REDIS_PORT']), password=app.config['REDIS_PASS'])
def test_info(self): self.resq.enqueue(Basic, "test1") self.resq.enqueue(TestProcess) info = self.resq.info() assert info["queues"] == 2 assert info["servers"] == ["localhost:6379"] assert info["workers"] == 0 worker = Worker(["basic"]) worker.register_worker() info = self.resq.info() assert info["workers"] == 1
def test_info(self): self.resq.enqueue(Basic, "test1") self.resq.enqueue(TestProcess) info = self.resq.info() assert info['queues'] == 2 assert info['servers'] == ['localhost:6379'] assert info['workers'] == 0 worker = Worker(['basic']) worker.register_worker() info = self.resq.info() assert info['workers'] == 1
def test_processed(self): name = "%s:%s:%s" % (os.uname()[1],os.getpid(),'basic') worker = Worker(['basic']) worker.processed() assert self.redis.exists("stat:processed") assert self.redis.exists("stat:processed:%s" % name) assert self.redis.get("stat:processed") == 1 assert self.redis.get("stat:processed:%s" % name) == 1 worker.processed() assert self.redis.get("stat:processed") == 2 assert self.redis.get("stat:processed:%s" % name) == 2
def test_failed(self): name = "%s:%s:%s" % (os.uname()[1],os.getpid(),'basic') worker = Worker(['basic']) worker.failed() assert self.redis.exists("resque:stat:failed") assert self.redis.exists("resque:stat:failed:%s" % name) assert self.redis.get("resque:stat:failed") == 1 assert self.redis.get("resque:stat:failed:%s" % name) == 1 worker.failed() assert self.redis.get("resque:stat:failed") == 2 assert self.redis.get("resque:stat:failed:%s" % name) == 2
def test_get_job(self): worker = Worker(['basic']) self.resq.enqueue(Basic,"test1") job = Job.reserve('basic', self.resq) worker.working_on(job) name = "%s:%s:%s" % (os.uname()[1],os.getpid(),'basic') assert worker.job() == ResQ.decode(self.redis.get('resque:worker:%s' % name)) worker.done_working() w2 = Worker(['basic']) print w2.job() assert w2.job() == {}
def test_signals(self): worker = self.worker worker.startup() import inspect, signal frame = inspect.currentframe() worker.schedule_shutdown(frame, signal.SIGQUIT) assert worker._shutdown del worker worker = Worker(['high']) #self.resq.enqueue(TestSleep) #worker.work() #assert worker.child assert not worker.kill_child(frame, signal.SIGUSR1)
def test_process(self): name = "%s:%s:%s" % (os.uname()[1],os.getpid(),'basic') self.resq.enqueue(Basic,"test1") job = Job.reserve('basic', self.resq) worker = Worker(['basic']) worker.process(job) assert not self.redis.get('resque:worker:%s' % worker) assert not self.redis.get("resque:stat:failed") assert not self.redis.get("resque:stat:failed:%s" % name) self.resq.enqueue(Basic,"test1") worker.process() assert not self.redis.get('resque:worker:%s' % worker) assert not self.redis.get("resque:stat:failed") assert not self.redis.get("resque:stat:failed:%s" % name)
def test_detect_code_0_os_exit_as_success(self): worker = Worker(['basic']) self.resq.enqueue(PrematureHardExitJob, 0) assert worker.job() == {} assert worker.get_failed() == 0 worker.fork_worker(worker.reserve()) assert worker.job() == {} assert worker.get_failed() == 0
def test_detect_non_0_sys_exit_as_failure(self): worker = Worker(['basic']) self.resq.enqueue(PrematureExitJob, 9) assert worker.job() == {} assert worker.get_failed() == 0 worker.fork_worker(worker.reserve()) assert worker.job() == {} assert worker.get_failed() == 1
def test_detect_crashed_workers_as_failures(self): worker = Worker(['basic']) self.resq.enqueue(CrashJob) assert worker.job() == {} assert worker.get_failed() == 0 worker.fork_worker(worker.reserve()) assert worker.job() == {} assert worker.get_failed() == 1
def run(self): import ggtracker from ggtracker.utils import django_setup from django.conf import settings django_setup() if options.logfile != '': setup_logging(procname="pyres_worker", log_level='INFO', filename=options.logfile) else: setup_logging(procname="pyres_worker", log_level='INFO') # setup_pidfile(options.pidfile) Worker.run(['python', 'python-low', 'python-bg'], settings.REDIS_SERVER)
def main(argv=sys.argv): """ Launches a pyres worker using the host and queues provided by the config keys `pyres.host` and `pyres.queues` """ if len(argv) != 2: usage(argv) config_uri = argv[1] settings = get_appsettings(config_uri) host = settings['pyres.host'] queues = settings['pyres.queues'].strip().split(',') setup_logging(procname="notaliens.tasks.worker", log_level="INFO") Worker.run(queues, server=host)
def handle(self, queue_list, **options): queues = queue_list.split(',') log_level = getattr(logging, options['log_level'].upper(), 'INFO') setup_logging(procname="pyres_worker", log_level=log_level, filename=None) setup_pidfile(settings.PYRES_WORKER_PIDFILE) interval = settings.PYRES_WORKER_INTERVAL worker = Worker( queues=queues, server=settings.PYRES_HOST, password=settings.PYRES_PASSWORD, timeout=settings.PYRES_WORKER_TIMEOUT ) if interval is not None: worker.work(interval) else: worker.work()
def test_kills_stale_workers_after_timeout(self): import signal timeout = 1 worker = Worker(['basic'], timeout=timeout) self.resq.enqueue(TimeoutJob, timeout + 1) child = os.fork() if child: assert worker.get_failed() == 0 time.sleep(timeout + 2) os.kill(child, signal.SIGKILL) os.waitpid(-1, os.WNOHANG) assert worker.get_failed() == 1 else: worker.work()
def test_prune_dead_workers(self): worker = Worker(['basic']) # we haven't registered this worker, so the assertion below holds assert self.redis.scard('resque:workers') == 0 self.redis.sadd('resque:workers',"%s:%s:%s" % (os.uname()[1],'1','basic')) self.redis.sadd('resque:workers',"%s:%s:%s" % (os.uname()[1],'2','basic')) self.redis.sadd('resque:workers',"%s:%s:%s" % (os.uname()[1],'3','basic')) assert self.redis.scard('resque:workers') == 3 worker.prune_dead_workers() assert self.redis.scard('resque:workers') == 0 self.redis.sadd('resque:workers',"%s:%s:%s" % ('host-that-does-not-exist','1','basic')) self.redis.sadd('resque:workers',"%s:%s:%s" % ('host-that-does-not-exist','2','basic')) self.redis.sadd('resque:workers',"%s:%s:%s" % ('host-that-does-not-exist','3','basic')) worker.prune_dead_workers() # the assertion below should hold, because the workers we registered above are on a # different host, and thus should not be pruned by this process assert self.redis.scard('resque:workers') == 3
def test_signals(self): worker = Worker(['basic']) worker.startup() import inspect, signal frame = inspect.currentframe() worker.schedule_shutdown(frame, signal.SIGQUIT) assert worker._shutdown del worker worker = Worker(['high']) #self.resq.enqueue(TestSleep) #worker.work() #assert worker.child assert not worker.kill_child(frame, signal.SIGUSR1)
def worker(self): worker = Wrkr.find(self.kwargs['worker_id'], self.resq) if not worker: return None host, pid, queues = str(worker).split(':') queues = queues.split(',') worker.host = host worker.pid = pid worker.queues = queues return worker
def test_kills_stale_workers_after_timeout(self): timeout = 1 worker = Worker(['basic'], timeout=timeout) self.resq.enqueue(TimeoutJob, timeout + 1) assert worker.get_failed() == 0 worker.fork_worker(worker.reserve()) assert worker.get_failed() == 1
def test_failed(self): name = "%s:%s:%s" % (os.uname()[1],os.getpid(),'basic') worker = Worker(['basic']) worker.failed() assert self.redis.exists("resque:stat:failed") assert self.redis.exists("resque:stat:failed:%s" % name) assert self.redis.get("resque:stat:failed") == str(1) assert self.redis.get("resque:stat:failed:%s" % name) == str(1) assert worker.get_failed() == 1 worker.failed() assert self.redis.get("resque:stat:failed") == str(2) assert self.redis.get("resque:stat:failed:%s" % name) == str(2) assert worker.get_failed() == 2
def test_prune_dead_workers(self): worker = Worker([ 'basic' ]) # we haven't registered this worker, so the assertion below holds assert self.redis.scard('resque:workers') == 0 self.redis.sadd('resque:workers', "%s:%s:%s" % (os.uname()[1], '1', 'basic')) self.redis.sadd('resque:workers', "%s:%s:%s" % (os.uname()[1], '2', 'basic')) self.redis.sadd('resque:workers', "%s:%s:%s" % (os.uname()[1], '3', 'basic')) assert self.redis.scard('resque:workers') == 3 worker.prune_dead_workers() assert self.redis.scard('resque:workers') == 0 self.redis.sadd( 'resque:workers', "%s:%s:%s" % ('host-that-does-not-exist', '1', 'basic')) self.redis.sadd( 'resque:workers', "%s:%s:%s" % ('host-that-does-not-exist', '2', 'basic')) self.redis.sadd( 'resque:workers', "%s:%s:%s" % ('host-that-does-not-exist', '3', 'basic')) worker.prune_dead_workers() # the assertion below should hold, because the workers we registered above are on a # different host, and thus should not be pruned by this process assert self.redis.scard('resque:workers') == 3
def test_worker_pids(self): # spawn worker processes and get pids pids = [] pids.append(self.spawn_worker(['basic'])) pids.append(self.spawn_worker(['basic'])) time.sleep(1) worker_pids = Worker.worker_pids() # send kill signal to workers and wait for them to exit import signal for pid in pids: os.kill(pid, signal.SIGQUIT) os.waitpid(pid, 0) # ensure worker_pids() returned the correct pids for pid in pids: assert str(pid) in worker_pids # ensure the workers are no longer returned by worker_pids() worker_pids = Worker.worker_pids() for pid in pids: assert str(pid) not in worker_pids
def test_working(self): worker = Worker(['basic']) self.resq.enqueue_from_string('tests.Basic','basic','test1') worker.register_worker() job = Job.reserve('basic', self.resq) worker.working_on(job) name = "%s:%s:%s" % (os.uname()[1],os.getpid(),'basic') workers = Worker.working(self.resq) assert len(workers) == 1 assert str(worker) == str(workers[0]) assert worker != workers[0]
def pyres_worker(): usage = "usage: %prog [options] arg1" parser = OptionParser(usage=usage) #parser.add_option("-q", dest="queue_list") parser.add_option("--host", dest="host", default="localhost") parser.add_option("--port", dest="port", type="int", default=6379) parser.add_option("-i", '--interval', dest='interval', default=None, help='the default time interval to sleep between runs') parser.add_option( '-l', '--log-level', dest='log_level', default='info', help= 'log level. Valid values are "debug", "info", "warning", "error", "critical", in decreasing order of verbosity. Defaults to "info" if parameter not specified.' ) parser.add_option('-f', dest='logfile', help='If present, a logfile will be used.') (options, args) = parser.parse_args() if len(args) != 1: parser.print_help() parser.error("Argument must be a comma seperated list of queues") log_level = getattr(logging, options.log_level.upper(), 'INFO') #logging.basicConfig(level=log_level, format="%(asctime)s: %(levelname)s: %(message)s") setup_logging(log_level=log_level, filename=options.logfile) interval = options.interval if interval is not None: interval = float(interval) queues = args[0].split(',') server = '%s:%s' % (options.host, options.port) Worker.run(queues, server, interval)
def main(args=None): if args is None: args = sys.argv[1:] parser = argparse.ArgumentParser() parser.add_argument('--conf', '-c', help="Path to configuration file.") parser.add_argument('--verbose', '-v', action='count', default=0, help='Log level: v=warning, vv=info, vvv=debug.') options = parser.parse_args(args) log_level = LOGS[options.verbose].upper() logging.basicConfig( level=getattr(logging, log_level), format='%(asctime)s - %(name)s - %(levelname)s - %(message)s' ) if options.conf: cfg = Config.load(abspath(expanduser(options.conf))) else: cfg = Config() conn = ResQ(server="%s:%s" % (cfg.REDIS_HOST, cfg.REDIS_PORT), password=cfg.REDIS_PASSWORD) conn.config = cfg connect( cfg.MONGO_DB, host=cfg.MONGO_HOST, port=cfg.MONGO_PORT, username=cfg.MONGO_USER, password=cfg.MONGO_PASS ) print print("--- Wight worker started ---") print Worker.run([WorkerJob.queue], conn) print print "--- Wight worker killed ---" print
def workers(self): workers = [] for w in Wrkr.all(self.resq): data = w.processing() host, pid, queues = str(w).split(':') item = {'state':w.state(), 'host':host, 'pid':pid, 'w':w, 'queues':queues.split(','), 'queue':w.job().get('queue')} if 'queue' in data: item['data'] = True item['code'] = data['payload']['class'] item['runat'] = datetime.datetime.fromtimestamp(float(data['run_at'])) workers.append(WebContainer(**item)) return workers
def test_retry_on_exception(self): now = datetime.datetime.now() self.set_current_time(now) worker = Worker(['basic']) scheduler = Scheduler() # queue up a job that will fail for 30 seconds self.resq.enqueue(RetryOnExceptionJob, now + datetime.timedelta(seconds=30)) worker.process() assert worker.get_failed() == 0 # check it retries the first time self.set_current_time(now + datetime.timedelta(seconds=5)) scheduler.handle_delayed_items() assert None == worker.process() assert worker.get_failed() == 0 # check it runs fine when it's stopped crashing self.set_current_time(now + datetime.timedelta(seconds=60)) scheduler.handle_delayed_items() assert True == worker.process() assert worker.get_failed() == 0
def test_retries_give_up_eventually(self): now = datetime.datetime.now() self.set_current_time(now) worker = Worker(['basic']) scheduler = Scheduler() # queue up a job that will fail for 60 seconds self.resq.enqueue(RetryOnExceptionJob, now + datetime.timedelta(seconds=60)) worker.process() assert worker.get_failed() == 0 # check it retries the first time self.set_current_time(now + datetime.timedelta(seconds=5)) scheduler.handle_delayed_items() assert None == worker.process() assert worker.get_failed() == 0 # check it fails when we've been trying too long self.set_current_time(now + datetime.timedelta(seconds=20)) scheduler.handle_delayed_items() assert None == worker.process() assert worker.get_failed() == 1