def test_broker(monkeypatch): broker = Broker() broker.enqueue('test') broker.dequeue() broker.queue_size() broker.lock_size() broker.purge_queue() broker.delete('id') broker.delete_queue() broker.acknowledge('test') broker.ping() broker.info() broker.close() # stats assert broker.get_stat('test_1') is None broker.set_stat('test_1', 'test', 3) assert broker.get_stat('test_1') == 'test' assert broker.get_stats('test:*')[0] == 'test' # stats with no cache monkeypatch.setattr(Conf, 'CACHE', 'not_configured') broker.cache = broker.get_cache() assert broker.get_stat('test_1') is None broker.set_stat('test_1', 'test', 3) assert broker.get_stat('test_1') is None assert broker.get_stats('test:*') is None
def test_broker(): broker = Broker() broker.enqueue('test') broker.dequeue() broker.queue_size() broker.lock_size() broker.purge_queue() broker.delete('id') broker.delete_queue() broker.acknowledge('test') broker.ping() broker.info() # stats assert broker.get_stat('test_1') is None broker.set_stat('test_1', 'test', 3) assert broker.get_stat('test_1') == 'test' assert broker.get_stats('test:*')[0] == 'test' # stats with no cache Conf.CACHE = 'not_configured' broker.cache = broker.get_cache() assert broker.get_stat('test_1') is None broker.set_stat('test_1', 'test', 3) assert broker.get_stat('test_1') is None assert broker.get_stats('test:*') is None Conf.CACHE = 'default'
def monitor(result_queue: Queue, broker: Broker = None): """ Gets finished tasks from the result queue and saves them to Django :type broker: brokers.Broker :type result_queue: multiprocessing.Queue """ if not broker: broker = get_broker() name = current_process().name logger.info(_(f"{name} monitoring at {current_process().pid}")) for task in iter(result_queue.get, "STOP"): # save the result if task.get("cached", False): save_cached(task, broker) else: save_task(task, broker) # acknowledge result ack_id = task.pop("ack_id", False) if ack_id and (task["success"] or task.get("ack_failure", False)): broker.acknowledge(ack_id) # log the result if task["success"]: # log success logger.info(_(f"Processed [{task['name']}]")) else: # log failure logger.error(_(f"Failed [{task['name']}] - {task['result']}")) logger.info(_(f"{name} stopped monitoring results"))
def test_broker(): broker = Broker() broker.enqueue("test") broker.dequeue() broker.queue_size() broker.lock_size() broker.purge_queue() broker.delete("id") broker.delete_queue() broker.acknowledge("test") broker.ping() broker.info() # stats assert broker.get_stat("test_1") is None broker.set_stat("test_1", "test", 3) assert broker.get_stat("test_1") == "test" assert broker.get_stats("test:*")[0] == "test" # stats with no cache Conf.CACHE = "not_configured" broker.cache = broker.get_cache() assert broker.get_stat("test_1") is None broker.set_stat("test_1", "test", 3) assert broker.get_stat("test_1") is None assert broker.get_stats("test:*") is None Conf.CACHE = "default"
def save_task(task, broker: Broker): """ Saves the task package to Django or the cache :param task: the task package :type broker: brokers.Broker """ # SAVE LIMIT < 0 : Don't save success if not task.get("save", Conf.SAVE_LIMIT >= 0) and task["success"]: return # enqueues next in a chain if task.get("chain", None): django_q.tasks.async_chain( task["chain"], group=task["group"], cached=task["cached"], sync=task["sync"], broker=broker, ) # SAVE LIMIT > 0: Prune database, SAVE_LIMIT 0: No pruning close_old_django_connections() try: with db.transaction.atomic(): last = Success.objects.select_for_update().last() if task["success"] and 0 < Conf.SAVE_LIMIT <= Success.objects.count(): last.delete() # check if this task has previous results if Task.objects.filter(id=task["id"], name=task["name"]).exists(): existing_task = Task.objects.get(id=task["id"], name=task["name"]) # only update the result if it hasn't succeeded yet if not existing_task.success: existing_task.stopped = task["stopped"] existing_task.result = task["result"] existing_task.success = task["success"] existing_task.attempt_count = existing_task.attempt_count + 1 existing_task.save() if Conf.MAX_ATTEMPTS > 0 and existing_task.attempt_count >= Conf.MAX_ATTEMPTS: broker.acknowledge(task['ack_id']) else: Task.objects.create( id=task["id"], name=task["name"], func=task["func"], hook=task.get("hook"), args=task["args"], kwargs=task["kwargs"], started=task["started"], stopped=task["stopped"], result=task["result"], group=task.get("group"), success=task["success"], attempt_count=1 ) except Exception as e: logger.error(e)
def test_broker(monkeypatch): broker = Broker() broker.enqueue("test") broker.dequeue() broker.queue_size() broker.lock_size() broker.purge_queue() broker.delete("id") broker.delete_queue() broker.acknowledge("test") broker.ping() broker.info() # stats assert broker.get_stat("test_1") is None broker.set_stat("test_1", "test", 3) assert broker.get_stat("test_1") == "test" assert broker.get_stats("test:*")[0] == "test" # stats with no cache monkeypatch.setattr(Conf, "CACHE", "not_configured") broker.cache = broker.get_cache() assert broker.get_stat("test_1") is None broker.set_stat("test_1", "test", 3) assert broker.get_stat("test_1") is None assert broker.get_stats("test:*") is None