def __init__(self, queue_name, sleep_time): self.queue = Queue(name=queue_name) self.sleep_time = sleep_time self.stopped = False self.enqueued = 0 self.last_time = time() self.last_processed = 0
def removeWorker(self, name): self.log.info("Removing worker/queue %s from system", name) #Remove worker from death workers worker = Worker(queues=[], name=name) yield worker.remove(worker.key) #Remove own queue of worker queue = Queue(name) yield queue.empty()
def enqueue(self): try: queue = Queue() yield queue.enqueue_call(func=example_func, args=(self.processed,)) self.processed+=1 #log.msg('Enqueued:', res) except Exception as e: log.msg('Exception enqueing:', e) raise finally: if not self.stopped: reactor.callLater(0.001, self.enqueue)
def relogin(self, xmppuser, pending_jobs): len_pending_jobs = len(pending_jobs) self.log.info('RELOGIN %s. Pending_Jobs: %s. Data: %s', xmppuser.jid, len_pending_jobs, xmppuser) try: yield self._shared_login(xmppuser) running_client = KatooApp().getService(xmppuser.userid) self.log.info('RELOGIN %s. Running xmppclient: %s', xmppuser.jid, running_client) except XMPPUserAlreadyLogged: #If user is already logged xmppuser data is valid and we take it as xmppuser running_client = KatooApp().getService(xmppuser.userid) self.log.warning('RELOGIN %s. User Already logged taking user to perform RELOGIN process. Running xmppclient: %s', xmppuser.jid, running_client) xmppuser = running_client.user xmppuser.worker = xmppuser.userid try: xmppuser.onMigrationTime = Timer().utcnow() res = yield xmppuser.save() self.log.info('perform relogin %s. Enqueuing pending jobs %s before migration was launched. Data %s. Save result: %s', xmppuser.jid, len_pending_jobs, running_client, res) queue = Queue(conf.MACHINEID) #Enqueue pending jobs before migration was launched for job_id in pending_jobs: try: job = yield Job.fetch(job_id, queue.connection) yield queue.enqueue_job(job) except NoSuchJobError: pass self.log.info('perform relogin %s. Finished enqueuing pending jobs before migration was launched.', xmppuser.jid) self.log.info('perform relogin %s. Enqueing pending jobs after migration was launched.', xmppuser.jid) #Enqueue pending jobs after migration was launched migration_queue = Queue(xmppuser.userid) migration_job_ids = yield migration_queue.job_ids yield migration_queue.empty() while migration_job_ids: job_id = migration_job_ids.pop(0) try: job = yield Job.fetch(job_id, migration_queue.connection) #Enqueue job in current worker queue yield queue.enqueue_job(job) except NoSuchJobError: pass if not migration_job_ids: xmppuser.worker=conf.MACHINEID xmppuser.onMigrationTime='' yield xmppuser.save() migration_job_ids = yield migration_queue.job_ids yield migration_queue.empty() finally: xmppuser.worker = conf.MACHINEID xmppuser.onMigrationTime='' res = yield xmppuser.save() self.log.info('RELOGIN %s. Finished. Data %s. Save result: %s', xmppuser.jid, xmppuser, res)
class TOfflineDequeingService(service.Service, RedisMixin): def __init__(self, queue_name, timeout): self.queue = Queue(name=queue_name) self.timeout = timeout self.stopped = False def callback_perform(self, result): if not self.stopped: reactor.callWhenRunning(self.dequeue) def callback_dequeue(self, job): if not job is None: d = threads.deferToThread(job.perform) d.addCallback(self.callback_perform) return d if not self.stopped: reactor.callWhenRunning(self.dequeue) return defer.succeed(None) def dequeue(self): d = self.queue.dequeue(self.timeout) d.addCallback(self.callback_dequeue) def startService(self): reactor.callLater(1, self.dequeue) service.Service.startService(self) def stopService(self): service.Service.stopService(self) self.stopped = False
class TEnqueingService(service.Service, RedisMixin): def __init__(self, queue_name, sleep_time): self.queue = Queue(name=queue_name) self.sleep_time = sleep_time self.stopped = False self.enqueued = 0 self.last_time = time() self.last_processed = 0 def callback_enqueue(self, data): self.enqueued+=1 if self.enqueued%1000 == 0: self.last_time, self.last_processed = print_stats(self.enqueued, 'Enqueued jobs', self.last_time, self.last_processed) def enqueue(self): try: d = self.queue.enqueue_call(func=example_func, args=(self.enqueued,)) d.addCallback(self.callback_enqueue) except Exception as e: log.msg('Exception enqueing:', e) raise finally: if not self.stopped: reactor.callLater(self.sleep_time, self.enqueue) def startService(self): reactor.callLater(1, self.enqueue) service.Service.startService(self) def stopService(self): service.Service.stopService(self) self.stopped = True
class TInlineDequeingService(service.Service, RedisMixin): def __init__(self, queue_name, blocking_time): self.queue = Queue(name=queue_name) self.blocking_time = blocking_time self.stopped = False @defer.inlineCallbacks def dequeue(self): while not self.stopped: try: job = yield self.queue.dequeue(self.blocking_time) if not job is None: yield threads.deferToThread(job.perform) except (UnpickleError, NoSuchJobError) as e: log.msg('Exception %s fetching job'%(e.__class__.__name__), e) except Exception as e: log.msg('Exception %s dequeing job %s:'%(e.__class__.__name__, str(job)), e) raise def startService(self): reactor.callLater(1, self.dequeue) service.Service.startService(self) def stopService(self): service.Service.stopService(self) self.stopped = True
def notify(self): keys = yield self._connection.dbsize() self._keys.add(keys) for key in self._items: queue = Queue(name=key, connection=self._connection) items = yield queue.count self._items[key].add(items)
def getPendingJobs(self, userid, queue_name): queue = Queue(queue_name) job_ids = yield queue.job_ids jobs = [] index = 0 for job_id in job_ids: try: job = yield Job.fetch(job_id, connection=queue.connection) if job.meta.get('userid') == userid: jobs.append(job_id) except Exception as e: self.log.err( e, '[%s] Exception fetching job %s with index %s while getPendingJobs in queue %s' % (userid, job_id, index, queue_name)) yield queue.remove(job_id) finally: index += 1 defer.returnValue(jobs)
def dequeue_any(self): try: res = yield Queue.dequeue_any(queue_keys=self.queue_keys, timeout=1, connection=RedisMixin.redis_conn) if not res is None: queue, job = res log.msg('Fetched job:%s from queue:%s'%(job, queue)) threads.deferToThread(job.perform) except Exception as e: log.msg('Exception dequeing:', e) raise e finally: if not self.stopped: reactor.callWhenRunning(self.dequeue_any)
def dequeue(self): try: res = yield Queue.lpop(self.queue_keys, timeout=1, connection=self.redis_conn) if res is None: return queue_key, job_id = res log.msg('Job: %s in queue: %s'%(job_id, queue_key)) job = yield Job.fetch(job_id, connection=self.redis_conn) if not job is None: log.msg('Fetch Job:', job) job.perform() except Exception as e: log.msg('Exception dequeing:', e) raise finally: if not self.stopped: reactor.callWhenRunning(self.dequeue)
def wrapped_f(*args, **kwargs): if len(args) == 0 or not isinstance(args[0], DistributedAPI): raise TypeError( 'SynchronousCall must be called with a DistributedAPI object' ) calling_self = args[0] args = args[1:] #More precedence queue_name of DistributedAPI than decorated method queue_name = calling_self.queue_name if calling_self.queue_name else self.queue_name ret = None if calling_self.enqueued or not queue_name: ret = yield f(calling_self, *args, **kwargs) else: function = getattr(calling_self, getattr(f, 'func_name')) queue = Queue(queue_name) calling_self.enqueued = True job = yield self._enqueue_job(queue, calling_self.key, function, args, kwargs) if self.sync or calling_self.sync: ret = yield self._get_result_retry(job) defer.returnValue(ret)
def __init__(self, queue_name, timeout): self.queue = Queue(name=queue_name) self.timeout = timeout self.stopped = False
def __init__(self, queues): self.queues = [Queue(name) for name in queues] self.queue_keys = [q.key for q in self.queues] self.stopped = False self.processed = 0
def __init__(self, queue_name, blocking_time): self.queue = Queue(name=queue_name) self.blocking_time = blocking_time self.stopped = False