class ResultProducer(multiprocessing.Process): def __init__(self, result_queue): super(ResultProducer, self).__init__() self.result_queue = result_queue self.log = properties.logger.getChild("result_producer") self.tq = RedisTaskQueue() def run(self): self.log.info("will produce results") while True: try: result = self.result_queue.get() self.log.info("will produce task result: [%s]" % result) task_id = result.get("task_id") new_since_id = result.get("since_id") new_max_id = result.get("max_id") new_state = result.get("state", WORK) error = result.get("error") self.tq.update_task_info(task_id, new_since_id, new_max_id) if error: error_detail = result.get("error_detail") self.tq.set_task_error(task_id, error_detail) self.log.info("set task to error!\n%s" % task_id) else: self.tq.set_task_state(task_id, new_state) self.log.info("set task [%s] \n%s" % (task_id, new_state)) except Exception as e: self.log.exception(e)
def __init__(self, result_queue): super(ResultProducer, self).__init__() self.result_queue = result_queue self.log = properties.logger.getChild("result_producer") self.tq = RedisTaskQueue()
source_post = message.get("re_post_info") if source_post: result["re_post_info"] = process_callback(context, source_post) return result if __name__ == '__main__': iteration_sleep_time = 5 watcher = WorkedWatcher(iteration_sleep_time * 2) log = properties.logger.getChild('main') prepare_fast_db_handler() tq = RedisTaskQueue(redis.Redis()) tq.restore_queue() load_credentials() result_queue = Queue() tasks_queue = Queue() ResultProducer(result_queue).start() ch = CredentialsHandler(refresh=True) retrievers = [] for i in xrange(properties.count_processes): rtr = RetrieverWorker(tasks_queue, process_callback, result_queue, str(i)) # rtr.daemon = True retrievers.append(rtr)