def process(self): """Load the module containing your class, and run the appropriate method. For example, if this job was popped from the queue ``testing``, then this would invoke the ``testing`` staticmethod of your class.""" try: method = getattr(self.klass, self.queue_name, getattr(self.klass, "process", None)) except Exception as exc: # We failed to import the module containing this class logger.exception("Failed to import %s" % self.klass_name) return self.fail(self.queue_name + "-" + exc.__class__.__name__, "Failed to import %s" % self.klass_name) if method: if isinstance(method, types.FunctionType): try: logger.info("Processing %s in %s" % (self.jid, self.queue_name)) method(self) logger.info("Completed %s in %s" % (self.jid, self.queue_name)) except Exception as exc: # Make error type based on exception type logger.exception("Failed %s in %s: %s" % (self.jid, self.queue_name, repr(method))) self.fail(self.queue_name + "-" + exc.__class__.__name__, traceback.format_exc()) else: # Or fail with a message to that effect logger.error("Failed %s in %s : %s is not static" % (self.jid, self.queue_name, repr(method))) self.fail(self.queue_name + "-method-type", repr(method) + " is not static") else: # Or fail with a message to that effect logger.error( 'Failed %s : %s is missing a method "%s" or "process"' % (self.jid, self.klass_name, self.queue_name) ) self.fail( self.queue_name + "-method-missing", self.klass_name + ' is missing a method "' + self.queue_name + '" or "process"', )
def foo(job): '''Dummy job''' time.sleep(job.data.get('sleep', 0)) try: job.complete() except: logger.exception('Unable to complete job %s' % job.jid)
def listen(self, listener): '''Listen for events that affect our ownership of a job''' for message in listener.listen(): try: data = json.loads(message['data']) if data['event'] in ('canceled', 'lock_lost', 'put'): self.kill(data['jid']) except: logger.exception('Pubsub error')
def handler(self, signum, frame): # pragma: no cover '''Signal handler for this process''' if signum in (signal.SIGTERM, signal.SIGINT, signal.SIGQUIT): for cpid in self.sandboxes.keys(): try: os.kill(cpid, signum) except OSError: # pragma: no cover logger.exception('Failed to send %s to %s...' % (signum, cpid)) exit(0)
def handler(self, signum, frame): # pragma: no cover '''Signal handler for this process''' if signum in (signal.SIGTERM, signal.SIGINT, signal.SIGQUIT): for cpid in self.sandboxes.keys(): try: os.kill(cpid, signum) except OSError: # pragma: no cover logger.exception( 'Failed to send %s to %s...' % (signum, cpid)) exit(0)
def stop(self, sig=signal.SIGINT): '''Stop all the workers, and then wait for them''' for cpid in self.sandboxes.keys(): logger.warn('Stopping %i...' % cpid) os.kill(cpid, sig) # While we still have children running, wait for them for cpid in self.sandboxes.keys(): try: logger.info('Waiting for %i...' % cpid) pid, status = os.waitpid(cpid, 0) logger.warn('%i stopped with status %i' % (pid, status >> 8)) except OSError: # pragma: no cover logger.exception('Error waiting for %i...' % cpid) finally: self.sandboxes.pop(pid, None)
def run(self): '''Run this worker''' self.signals(('TERM', 'INT', 'QUIT')) # Divide up the jobs that we have to divy up between the workers. This # produces evenly-sized groups of jobs resume = self.divide(self.resume, self.count) for index in range(self.count): # The sandbox for the child worker sandbox = os.path.join(os.getcwd(), 'qless-py-workers', 'sandbox-%s' % index) cpid = os.fork() if cpid: logger.info('Spawned worker %i' % cpid) self.sandboxes[cpid] = sandbox else: # pragma: no cover # Move to the sandbox as the current working directory with Worker.sandbox(sandbox): os.chdir(sandbox) try: self.spawn(resume=resume[index], sandbox=sandbox).run() except: logger.exception('Exception in spawned worker') finally: os._exit(0) try: while not self.shutdown: pid, status = os.wait() logger.warn('Worker %i died with status %i from signal %i' % (pid, status >> 8, status & 0xff)) sandbox = self.sandboxes.pop(pid) cpid = os.fork() if cpid: logger.info('Spawned replacement worker %i' % cpid) self.sandboxes[cpid] = sandbox else: # pragma: no cover with Worker.sandbox(sandbox): os.chdir(sandbox) try: self.spawn(sandbox=sandbox).run() except: logger.exception('Exception in spawned worker') finally: os._exit(0) finally: self.stop(signal.SIGKILL)
def run(self): '''Run this worker''' self.signals(('TERM', 'INT', 'QUIT')) # Divide up the jobs that we have to divy up between the workers. This # produces evenly-sized groups of jobs resume = self.divide(self.resume, self.count) for index in range(self.count): # The sandbox for the child worker sandbox = os.path.join( os.getcwd(), 'qless-py-workers', 'sandbox-%s' % index) cpid = os.fork() if cpid: logger.info('Spawned worker %i' % cpid) self.sandboxes[cpid] = sandbox else: # pragma: no cover # Move to the sandbox as the current working directory with Worker.sandbox(sandbox): os.chdir(sandbox) try: self.spawn(resume=resume[index], sandbox=sandbox).run() except: logger.exception('Exception in spawned worker') finally: os._exit(0) try: while not self.shutdown: pid, status = os.wait() logger.warn('Worker %i died with status %i from signal %i' % ( pid, status >> 8, status & 0xff)) sandbox = self.sandboxes.pop(pid) cpid = os.fork() if cpid: logger.info('Spawned replacement worker %i' % cpid) self.sandboxes[cpid] = sandbox else: # pragma: no cover with Worker.sandbox(sandbox): os.chdir(sandbox) try: self.spawn(sandbox=sandbox).run() except: logger.exception('Exception in spawned worker') finally: os._exit(0) finally: self.stop(signal.SIGKILL)
def stop(self, sig=signal.SIGINT): '''Stop all the workers, and then wait for them''' for cpid in self.sandboxes.keys(): logger.warn('Stopping %i...' % cpid) try: os.kill(cpid, sig) except OSError: # pragma: no cover logger.exception('Error stopping %s...' % cpid) # While we still have children running, wait for them for cpid in self.sandboxes.keys(): try: logger.info('Waiting for %i...' % cpid) pid, status = os.waitpid(cpid, 0) logger.warn('%i stopped with status %i' % (pid, status >> 8)) except OSError: # pragma: no cover logger.exception('Error waiting for %i...' % cpid) finally: self.sandboxes.pop(cpid, None)
def jobs(self): '''Generator for all the jobs''' # If we should resume work, then we should hand those out first, # assuming we can still heartbeat them for job in self.resume: try: if job.heartbeat(): yield job except exceptions.LostLockException: logger.exception('Cannot resume %s' % job.jid) while True: seen = False for queue in self.queues: job = queue.pop() if job: seen = True yield job if not seen: yield None
def process(self): '''Load the module containing your class, and run the appropriate method. For example, if this job was popped from the queue ``testing``, then this would invoke the ``testing`` staticmethod of your class.''' try: method = getattr(self.klass, self.queue_name, getattr(self.klass, 'process', None)) except Exception as exc: # We failed to import the module containing this class logger.exception('Failed to import %s' % self.klass_name) self.fail(self.queue_name + '-' + exc.__class__.__name__, 'Failed to import %s' % self.klass_name) if method: if isinstance(method, types.FunctionType): try: logger.info('Processing %s in %s' % (self.jid, self.queue_name)) method(self) logger.info('Completed %s in %s' % (self.jid, self.queue_name)) except Exception as e: # Make error type based on exception type logger.exception('Failed %s in %s: %s' % (self.jid, self.queue_name, repr(method))) self.fail(self.queue_name + '-' + e.__class__.__name__, traceback.format_exc()) else: # Or fail with a message to that effect logger.error('Failed %s in %s : %s is not static' % (self.jid, self.queue_name, repr(method))) self.fail(self.queue_name + '-method-type', repr(method) + ' is not static') else: # Or fail with a message to that effect logger.error( 'Failed %s : %s is missing a method "%s" or "process"' % (self.jid, self.klass_name, self.queue_name)) self.fail( self.queue_name + '-method-missing', self.klass_name + ' is missing a method "' + self.queue_name + '" or "process"')
def stop(self, sig=signal.SIGINT): '''Stop all the workers, and then wait for them''' for cpid in self.sandboxes: logger.warn('Stopping %i...' % cpid) try: os.kill(cpid, sig) except OSError: # pragma: no cover logger.exception('Error stopping %s...' % cpid) # While we still have children running, wait for them # We edit the dictionary during the loop, so we need to copy its keys for cpid in list(self.sandboxes): try: logger.info('Waiting for %i...' % cpid) pid, status = os.waitpid(cpid, 0) logger.warn('%i stopped with status %i' % (pid, status >> 8)) except OSError: # pragma: no cover logger.exception('Error waiting for %i...' % cpid) finally: self.sandboxes.pop(cpid, None)