class NSQProxy: def __init__(self, topic, nsqds): self.topic = topic self.writer_pool = HostPool([Writer([nsqd]) for nsqd in nsqds]) def relay(self, nsq_message): nsq_message.enable_async() writer = self.writer_pool.get() callback = functools.partial(self._on_message_response, nsq_message=nsq_message, writer=writer) writer.pub(self.topic, nsq_message.body, callback) def _on_message_response(self, conn, data, nsq_message, writer): if isinstance(data, Error): logging.warning("requeuing message: %s", nsq_message.body) self.writer_pool.failed(writer) nsq_message.requeue() else: self.writer_pool.success(writer) nsq_message.finish()
class NSQProxy: def __init__(self, topic, nsqds): self.topic = topic self.writer_pool = HostPool([Writer([nsqd]) for nsqd in nsqds]) def relay(self, nsq_message): nsq_message.enable_async() writer = self.writer_pool.get() callback = functools.partial( self._on_message_response, nsq_message=nsq_message, writer=writer) writer.pub(self.topic, nsq_message.body, callback) def _on_message_response(self, conn, data, nsq_message, writer): if isinstance(data, Error): logging.warning("requeuing message: %s", nsq_message.body) self.writer_pool.failed(writer) nsq_message.requeue() else: self.writer_pool.success(writer) nsq_message.finish()
def __init__(self, simplequeue_address, all_tasks, max_tries=5, sleeptime_failed_queue=5, sleeptime_queue_empty=0.5, sleeptime_requeue=1, requeue_delay=90, mget_items=0, failed_count=0, queuename=None, preprocess_method=None, validate_method=None, requeue_giveup=None, failed_message_dir=None): """ BaseReader provides a queue that calls each task provided by ``all_tasks`` up to ``max_tries`` requeueing on any failures with increasing multiples of ``requeue_delay`` between subsequent tries of each message. ``preprocess_method`` defines an optional method that can alter the message data before other task functions are called. ``validate_method`` defines an optional method that returns a boolean as to weather or not this message should be processed. ``all_tasks`` defines the a mapping of tasks and functions that individually will be called with the message data. ``requeue_giveup`` defines a callback for when a message has been called ``max_tries`` times ``failed_message_dir`` defines a directory where failed messages should be written to """ assert isinstance(all_tasks, dict) for key, method in all_tasks.items(): assert callable(method), "key %s must have a callable value" % key if preprocess_method: assert callable(preprocess_method) if validate_method: assert callable(validate_method) assert isinstance(queuename, (str, unicode)) assert isinstance(mget_items, int) if not isinstance(simplequeue_address, HostPool): if isinstance(simplequeue_address, (str, unicode)): simplequeue_address = [simplequeue_address] assert isinstance(simplequeue_address, (list, set, tuple)) simplequeue_address = HostPool(simplequeue_address) self.simplequeue_address = simplequeue_address self.max_tries = max_tries self.requeue_giveup = requeue_giveup self.mget_items = mget_items self.sleeptime_failed_queue = sleeptime_failed_queue self.sleeptime_queue_empty = sleeptime_queue_empty self.sleeptime_requeue = sleeptime_requeue self.requeue_delay = requeue_delay # seconds ## max delay time is requeue_delay * (max_tries + max_tries-1 + max_tries-2 ... 1) self.failed_message_dir = failed_message_dir or tempfile.mkdtemp() assert os.access(self.failed_message_dir, os.W_OK) self.failed_count = failed_count self.queuename = queuename self.task_lookup = all_tasks self.preprocess_method = preprocess_method self.validate_method = validate_method self.backoff_timer = dict((k, BackoffTimer.BackoffTimer(0, 120)) for k in self.task_lookup.keys()) # a default backoff timer self.backoff_timer['__preprocess'] = BackoffTimer.BackoffTimer(0, 120) self.quit_flag = False
def __init__(self, topic, nsqds): self.topic = topic self.writer_pool = HostPool([Writer([nsqd]) for nsqd in nsqds])