Beispiel #1
0
def test_interrupt_redis_started_jobs(worker):

    worker.start(
        queues="xxx", flags=" --config tests/fixtures/config-lostjobs.py")

    worker.send_task("tests.tasks.general.Add", {
                     "a": 41, "b": 1, "sleep": 10}, block=False, queue="xxx")
    worker.send_task("tests.tasks.general.Add", {
                     "a": 41, "b": 1, "sleep": 10}, block=False, queue="xxx")

    time.sleep(3)

    worker.stop(deps=False)

    assert Queue("xxx").size() == 0
    assert connections.redis.zcard(Queue.redis_key_started()) == 2

    worker.start(queues="default", start_deps=False, flush=False)

    assert connections.redis.zcard(Queue.redis_key_started()) == 2

    res = worker.send_task("mrq.basetasks.cleaning.RequeueRedisStartedJobs", {
        "timeout": 0
    }, block=True, queue="default")

    assert res["fetched"] == 2
    assert res["requeued"] == 2

    assert Queue("xxx").size() == 2
    assert Queue("default").size() == 0
    assert connections.redis.zcard(Queue.redis_key_started()) == 0
Beispiel #2
0
def test_known_queues_lifecycle(worker):

    worker.start(queues="default_reverse xtest test_timed_set", flags="--config tests/fixtures/config-raw1.py")
    time.sleep(1)
    # Test known queues
    from mrq.queue import Queue, send_task
    assert set(Queue.redis_known_queues().keys()) == set(["default", "xtest", "test_timed_set"])

    # Try queueing a task
    send_task("tests.tasks.general.Add", {"a": 41, "b": 1, "sleep": 1}, queue="x")
    time.sleep(1)
    assert set(Queue.redis_known_queues().keys()) == set(["x", "default", "xtest", "test_timed_set"])

    Queue("x").add_to_known_queues(timestamp=time.time() - (8 * 86400))

    worker.send_task("mrq.basetasks.cleaning.CleanKnownQueues", {}, block=True)

    # Not removed - not empty yet.
    assert set(Queue.redis_known_queues().keys()) == set(["x", "default", "xtest", "test_timed_set"])

    Queue("x").empty()

    # Still not removed.
    assert set(Queue.redis_known_queues().keys()) == set(["x", "default", "xtest", "test_timed_set"])

    worker.send_task("mrq.basetasks.cleaning.CleanKnownQueues", {}, block=True)

    # Now we're good
    assert set(Queue.redis_known_queues().keys()) == set(["default", "xtest", "test_timed_set"])
Beispiel #3
0
def test_raw_set(worker, has_subqueue, p_queue, p_set):
    flags = "--greenlets 10 --config tests/fixtures/config-raw1.py"
    if has_subqueue:
      flags = "%s --subqueues_refresh_interval=0.1" % flags
      # worker should dequeue all subqueues
      p_queue = "%s/" % p_queue

    worker.start(flags=flags, queues=p_queue)

    if has_subqueue:
      # queue tasks in p_queue/subqueue
      p_queue = "%ssubqueue" % p_queue

    test_collection = worker.mongodb_logs.tests_inserts
    jobs_collection = worker.mongodb_jobs.mrq_jobs

    assert jobs_collection.count() == 0

    assert Queue(p_queue).size() == 0

    worker.send_raw_tasks(p_queue, ["aaa", "bbb", "ccc", "bbb"], block=True)

    assert Queue(p_queue).size() == 0

    if p_set:
        assert jobs_collection.count() == 3
        assert jobs_collection.count({"status": "success"}) == 3

        assert test_collection.count() == 3

    else:
        assert jobs_collection.count() == 4
        assert jobs_collection.count({"status": "success"}) == 4

        assert test_collection.count() == 4
Beispiel #4
0
def test_retry_from_other_queue_stays_on_queue(worker):
    worker.start(queues="default exec")

    worker.send_task("tests.tasks.general.Retry", {"delay": 1},
                     queue="exec",
                     accept_statuses="retry")

    time.sleep(2)

    job_id = worker.mongodb_jobs.mrq_jobs.find()[0]["_id"]
    job = Job(job_id).fetch()

    assert job.data["status"] == "retry"
    assert job.data["queue"] == "exec"

    assert Queue("default").size() == 0
    assert Queue("exec").size() == 0

    worker.stop(deps=False)

    worker.start(queues="default", deps=False)

    # Should do nothing yet
    worker.send_task("mrq.basetasks.cleaning.RequeueRetryJobs", {}, block=True)

    assert Queue("default").size() == 0
    assert Queue("exec").size() == 1

    job = Job(job_id).fetch()
    assert job.data["status"] == "queued"
    assert job.data["queue"] == "exec"
Beispiel #5
0
def test_raw_mixed(worker, p_queue, p_greenlets):

    worker.start_deps()

    worker.send_raw_tasks(
        "test_raw", ["aaa", "bbb", "ccc"], start=False, block=False)

    worker.send_task("tests.tasks.general.MongoInsert", {
        "not_raw": "ddd"
    }, start=False, block=False)

    assert Queue("test_raw").size() == 3
    assert Queue("default").size() == 1

    worker.start(flags="--greenlets %s --config tests/fixtures/config-raw1.py" %
                 p_greenlets, queues=p_queue, deps=False)

    test_collection = worker.mongodb_logs.tests_inserts
    jobs_collection = worker.mongodb_jobs.mrq_jobs

    time.sleep(1)

    assert Queue("test_raw").size() == 0
    assert Queue("default").size() == 0

    assert test_collection.count() == 4
    assert jobs_collection.count() == 4
    assert jobs_collection.find({"status": "success"}).count() == 4

    assert list(jobs_collection.find({"status": "success"}))[0]["worker"]
Beispiel #6
0
def test_interrupt_worker_double_sigint(worker, p_flags):
    """ Test what happens when we interrupt a running worker with 2 SIGINTs. """

    start_time = time.time()

    worker.start(flags=p_flags)

    job_id = worker.send_task(
        "tests.tasks.general.Add", {"a": 41, "b": 1, "sleep": 10}, block=False)

    time.sleep(1)

    job = Job(job_id).fetch().data
    assert job["status"] == "started"

    # Stop the worker gracefully. first job should still finish!
    worker.stop(block=False, deps=False)

    time.sleep(1)

    # Should not be accepting new jobs!
    job_id2 = worker.send_task(
        "tests.tasks.general.Add", {"a": 42, "b": 1, "sleep": 10}, block=False)

    time.sleep(1)

    job = Job(job_id2).fetch().data
    assert job.get("status") == "queued"

    # Sending a second kill -2 should make it stop
    worker.stop(block=True, deps=False, force=True)

    time.sleep(1)

    job = Job(job_id).fetch().data
    assert job["status"] == "interrupt"

    assert time.time() - start_time < 8

    # Then try the cleaning task that requeues interrupted jobs

    assert Queue("default").size() == 1

    worker.start(queues="cleaning", deps=False, flush=False)

    res = worker.send_task(
        "mrq.basetasks.cleaning.RequeueInterruptedJobs", {}, block=True, queue="cleaning")

    assert res["requeued"] == 1

    assert Queue("default").size() == 2

    Queue("default").list_job_ids() == [str(job_id2), str(job_id)]

    job = Job(job_id).fetch().data
    assert job["status"] == "queued"
    assert job["queue"] == "default"
Beispiel #7
0
def test_pause_subqueue(worker):

    # set config in current context in order to have a subqueue delimiter
    set_current_config(get_config(config_type="worker"))

    worker.start(
        queues="high high/",
        flags=
        "--subqueues_refresh_interval=1 --paused_queues_refresh_interval=1")

    Queue("high").pause()

    assert Queue("high/").is_paused()

    # wait for the paused_queues list to be refreshed
    time.sleep(2)

    job_id1 = send_task("tests.tasks.general.MongoInsert", {"a": 41},
                        queue="high")

    job_id2 = send_task("tests.tasks.general.MongoInsert", {"a": 43},
                        queue="high/subqueue")

    # wait a bit to make sure the jobs status will still be queued
    time.sleep(5)

    job1 = Job(job_id1).fetch().data
    job2 = Job(job_id2).fetch().data

    assert job1["status"] == "queued"
    assert job2["status"] == "queued"

    assert worker.mongodb_jobs.tests_inserts.count() == 0

    Queue("high/").resume()

    Job(job_id1).wait(poll_interval=0.01)

    Job(job_id2).wait(poll_interval=0.01)

    job1 = Job(job_id1).fetch().data
    job2 = Job(job_id2).fetch().data

    assert job1["status"] == "success"
    assert job1["result"] == {"a": 41}

    assert job2["status"] == "success"
    assert job2["result"] == {"a": 43}

    assert worker.mongodb_jobs.tests_inserts.count() == 2

    worker.stop()
Beispiel #8
0
def test_raw_remove(worker, p_queue):

    worker.start_deps()

    worker.send_raw_tasks(p_queue, ["aa", "bb", "cc"],
                          block=False,
                          start=False)

    assert Queue(p_queue).size() == 3

    Queue(p_queue).remove_raw_jobs(["aa", "cc"])

    assert Queue(p_queue).size() == 1
Beispiel #9
0
def test_retry_otherqueue_delay_zero(worker):

    # delay = 0 should requeue right away.
    job_id = worker.send_task("tests.tasks.general.Retry", {
        "queue": "noexec",
        "delay": 0
    },
                              block=False)

    time.sleep(1)

    assert worker.mongodb_logs.tests_inserts.find().count() == 1

    assert Queue("default").size() == 0
    assert Queue("noexec").size() == 1
    assert Queue("noexec").list_job_ids() == [str(job_id)]
Beispiel #10
0
def test_worker_crash(worker):
    """ Test that when a worker crashes its running jobs are requeued """

    worker.start(queues="default")
    worker.send_task("tests.tasks.general.Add", {
        "a": 41,
        "b": 1,
        "sleep": 10
    },
                     block=False,
                     queue="default")

    time.sleep(5)

    worker.stop(block=True, sig=9, deps=False)

    time.sleep(1)

    # simulate worker crash
    worker.mongodb_jobs.mrq_workers.delete_many({})
    worker.start(queues="cleaning", deps=False, flush=False)

    res = worker.send_task("mrq.basetasks.cleaning.RequeueStartedJobs",
                           {"timeout": 90},
                           block=True,
                           queue="cleaning")

    assert res["requeued"] == 1
    assert res["started"] == 2
    assert Queue("default").size() == 1
Beispiel #11
0
    def run(self, params):

        collection = connections.mongodb_jobs.simple_crawler_urls

        collection.remove({})

        Queue("crawl").empty()
Beispiel #12
0
    def run(self, params):

        max_age = int(params.get("max_age") or (7 * 86400))
        pretend = bool(params.get("pretend"))
        check_mongo = bool(params.get("check_mongo"))

        known_queues = Queue.redis_known_queues()

        removed_queues = []

        queues_from_config = Queue.all_known_from_config()

        print "Found %s known queues & %s from config" % (
            len(known_queues), len(queues_from_config))

        # Only clean queues older than N days
        time_threshold = time.time() - max_age
        for queue, time_last_used in known_queues.iteritems():
            if queue in queues_from_config:
                continue
            if time_last_used < time_threshold:
                q = Queue(queue, add_to_known_queues=False)
                size = q.size()
                if check_mongo:
                    size += connections.mongodb_jobs.mrq_jobs.count(
                        {"queue": queue})
                if size == 0:
                    removed_queues.append(queue)
                    print "Removing empty queue '%s' from known queues ..." % queue
                    if not pretend:
                        q.remove_from_known_queues()

        print "Cleaned %s queues" % len(removed_queues)

        return removed_queues
Beispiel #13
0
def test_interrupt_redis_flush(worker):
    """ Test what happens when we flush redis after queueing jobs.

        The RequeueLostJobs task should put them back in redis.
    """

    worker.start(queues="cleaning", deps=True, flush=True)

    job_id1 = worker.send_task("tests.tasks.general.Add", {
                               "a": 41, "b": 1, "sleep": 10}, block=False, queue="default")
    job_id2 = worker.send_task("tests.tasks.general.Add", {
                               "a": 41, "b": 1, "sleep": 10}, block=False, queue="default")
    job_id3 = worker.send_task("tests.tasks.general.Add", {
                               "a": 41, "b": 1, "sleep": 10}, block=False, queue="otherq")

    assert Queue("default").size() == 2
    assert Queue("otherq").size() == 1

    res = worker.send_task(
        "mrq.basetasks.cleaning.RequeueLostJobs", {}, block=True, queue="cleaning")

    # We should try the first job only, and when seeing it's there we should
    # stop.
    assert res["fetched"] == 1
    assert res["requeued"] == 0

    assert Queue("default").size() == 2
    assert Queue("otherq").size() == 1

    # Then flush redis!
    worker.fixture_redis.flush()

    # Assert the queues are empty.
    assert Queue("default").size() == 0
    assert Queue("otherq").size() == 0

    res = worker.send_task(
        "mrq.basetasks.cleaning.RequeueLostJobs", {}, block=True, queue="cleaning")

    assert res["fetched"] == 3
    assert res["requeued"] == 3

    assert Queue("default").size() == 2
    assert Queue("otherq").size() == 1

    assert Queue("default").list_job_ids() == [str(job_id1), str(job_id2)]
    assert Queue("otherq").list_job_ids() == [str(job_id3)]
Beispiel #14
0
def build_api_datatables_query(req):
    query = {}

    if req.args.get("redisqueue"):
        query["_id"] = {
            "$in": [
                ObjectId(x)
                for x in Queue(req.args.get("redisqueue")).list_job_ids(
                    limit=1000)
            ]
        }
    else:

        for param in ["queue", "path", "exceptiontype"]:
            if req.args.get(param):
                if "*" in req.args[param]:
                    regexp = "^%s$" % req.args[param].replace("*", ".*")
                    query[param] = {"$regex": regexp}
                else:
                    query[param] = req.args[param]

        if req.args.get("queue") and req.args["queue"].endswith("/"):
            subqueues = Queue(req.args["queue"]).get_known_subqueues()
            query["queue"] = {"$in": list(subqueues)}

        if req.args.get("status"):
            statuses = req.args["status"].split("-")
            if len(statuses) == 1:
                query["status"] = statuses[0]
            else:
                query["status"] = {"$in": statuses}
        if req.args.get("id"):
            query["_id"] = ObjectId(req.args.get("id"))
        if req.args.get("worker"):
            query["worker"] = ObjectId(req.args.get("worker"))

        if req.args.get("params"):
            try:
                params_dict = json.loads(req.args.get("params"))

                for key in params_dict:
                    query["params.%s" % key] = params_dict[key]
            except Exception as e:  # pylint: disable=broad-except
                print("Error will converting form JSON: %s" % e)

    return query
Beispiel #15
0
    def run(self, params):

        # If there are more than this much items on the queue, we don't try to check if our mongodb
        # jobs are still queued.
        max_queue_items = params.get("max_queue_items", 1000)

        stats = {"fetched": 0, "requeued": 0}

        all_queues = Queue.all_known()

        for queue_name in all_queues:

            queue = Queue(queue_name)
            queue_size = queue.size()

            if queue.is_raw:
                continue

            log.info("Checking queue %s" % queue_name)

            if queue_size > max_queue_items:
                log.info("Stopping because queue %s has %s items" %
                         (queue_name, queue_size))
                continue

            queue_jobs_ids = set(queue.list_job_ids(limit=max_queue_items + 1))
            if len(queue_jobs_ids) >= max_queue_items:
                log.info(
                    "Stopping because queue %s actually had more than %s items"
                    % (queue_name, len(queue_jobs_ids)))
                continue

            for job_data in connections.mongodb_jobs.mrq_jobs.find(
                {
                    "queue": queue_name,
                    "status": "queued"
                },
                    projection={
                        "_id": 1
                    }).sort([["_id", 1]]):

                stats["fetched"] += 1

                if str(job_data["_id"]) in queue_jobs_ids:
                    log.info("Found job %s on queue %s. Stopping" %
                             (job_data["_id"], queue.id))
                    break

                # At this point, this job is not on the queue and we're sure
                # the queue is less than max_queue_items
                # We can safely requeue the job.
                log.info("Requeueing %s on %s" % (job_data["_id"], queue.id))

                stats["requeued"] += 1
                job = Job(job_data["_id"])
                job.requeue(queue=queue_name)

        return stats
Beispiel #16
0
def test_raw_exception(worker):

    p_queue = "testexception_raw"

    worker.start(
        flags="--greenlets 10 --config tests/fixtures/config-raw1.py", queues=p_queue)

    jobs_collection = worker.mongodb_jobs.mrq_jobs
    assert jobs_collection.count() == 0
    assert Queue(p_queue).size() == 0

    worker.send_raw_tasks(p_queue, ["msg1"], block=True)

    failjob = list(jobs_collection.find())[0]

    assert Queue("default").size() == 0
    assert Queue(p_queue).size() == 0
    assert jobs_collection.count() == 1
    assert failjob["status"] == "failed"

    worker.stop(deps=False)

    worker.start(
        deps=False, flags="--greenlets 10 --config tests/fixtures/config-raw1.py", queues="default")

    worker.send_task(
        "mrq.basetasks.utils.JobAction",
        {
            "id": failjob["_id"],
            "action": "requeue"
        },
        block=True
    )

    assert Queue("default").size() == 0
    assert Queue(p_queue).size() == 0
    assert Queue("testx").size() == 1
    assert jobs_collection.count() == 2
    assert list(jobs_collection.find({"_id": failjob["_id"]}))[
        0]["status"] == "queued"
    assert list(jobs_collection.find({"_id": {"$ne": failjob["_id"]}}))[
        0]["status"] == "success"

    worker.stop(deps=False)

    worker.start(
        deps=False, flags="--greenlets 10 --config tests/fixtures/config-raw1.py", queues="default testx")

    time.sleep(2)

    assert Queue(p_queue).size() == 0
    assert jobs_collection.count() == 2
    assert Queue("testx").size() == 0
    assert list(jobs_collection.find({"_id": failjob["_id"]}))[
        0]["status"] == "failed"
Beispiel #17
0
def test_retry_max_retries(worker):

    # Task has maxretries=1
    worker.start(flags="--config tests/fixtures/config-retry1.py")

    worker.send_task("tests.tasks.general.Retry", {},
                     block=True,
                     accept_statuses=["retry"])

    assert Queue("default").size() == 0

    job_id = worker.mongodb_jobs.mrq_jobs.find()[0]["_id"]

    job = Job(job_id).fetch()
    assert job.data["status"] == "retry"
    assert job.data["retry_count"] == 1

    time.sleep(2)

    # Should requeue
    worker.send_task("mrq.basetasks.cleaning.RequeueRetryJobs", {}, block=True)

    time.sleep(2)

    assert Queue("default").size() == 0

    job = Job(job_id).fetch()
    assert job.data["status"] == "maxretries"
    assert job.data["retry_count"] == 1

    # Then, manual requeue from the dashboard should reset the retry_count field.
    params = {
        "action": "requeue",
        "status": "maxretries",
        "destination_queue": "noexec"
    }

    worker.send_task("mrq.basetasks.utils.JobAction", params, block=True)

    job = Job(job_id).fetch()
    assert job.data["status"] == "queued"
    assert job.data["queue"] == "noexec"
    assert job.data["retry_count"] == 0
Beispiel #18
0
def test_pause_resume(worker):

    worker.start(flags="--paused_queues_refresh_interval=0.1")

    Queue("high").pause()

    assert Queue("high").is_paused()

    # wait for the paused_queues list to be refreshed
    time.sleep(2)

    job_id1 = send_task("tests.tasks.general.MongoInsert", {"a": 41},
                        queue="high")

    job_id2 = send_task("tests.tasks.general.MongoInsert", {"a": 43},
                        queue="low")

    time.sleep(5)

    job1 = Job(job_id1).fetch().data
    job2 = Job(job_id2).fetch().data

    assert job1["status"] == "queued"

    assert job2["status"] == "success"
    assert job2["result"] == {"a": 43}

    assert worker.mongodb_jobs.tests_inserts.count() == 1

    Queue("high").resume()

    Job(job_id1).wait(poll_interval=0.01)

    job1 = Job(job_id1).fetch().data

    assert job1["status"] == "success"
    assert job1["result"] == {"a": 41}

    assert worker.mongodb_jobs.tests_inserts.count() == 2

    worker.stop()
Beispiel #19
0
def test_pause_refresh_interval(worker):
    """ Tests that a refresh interval of 0 disables the pause functionnality """

    worker.start(flags="--paused_queues_refresh_interval=0")

    Queue("high").pause()

    assert Queue("high").is_paused()

    # wait for the paused_queues list to be refreshed
    time.sleep(2)

    job_id1 = send_task("tests.tasks.general.MongoInsert", {"a": 41},
                        queue="high")

    time.sleep(5)

    job1 = Job(job_id1).fetch().data

    assert job1["status"] == "success"
    assert job1["result"] == {"a": 41}
Beispiel #20
0
def test_raw_retry(worker):

    p_queue = "testretry_raw"

    worker.start(flags="--greenlets 10 --config tests/fixtures/config-raw1.py",
                 queues=p_queue)

    jobs_collection = worker.mongodb_jobs.mrq_jobs
    assert jobs_collection.count() == 0
    assert Queue(p_queue).size() == 0

    worker.send_raw_tasks(p_queue, [0], block=True)

    failjob = list(jobs_collection.find())[0]

    assert Queue("testx").size() == 1
    assert Queue("default").size() == 0
    assert Queue(p_queue).size() == 0
    assert jobs_collection.count() == 1
    assert failjob["status"] == "queued"
    assert failjob["queue"] == "testx"
Beispiel #21
0
def test_retry_otherqueue_delay_nonzero(worker):

    # delay = 0 should requeue right away.
    worker.send_task("tests.tasks.general.Retry", {
        "queue": "noexec",
        "delay": 2
    },
                     block=True,
                     accept_statuses=["retry"])

    assert Queue("default").size() == 0
    assert Queue("noexec").size() == 0

    job_id = worker.mongodb_jobs.mrq_jobs.find()[0]["_id"]

    job = Job(job_id).fetch()
    assert job.data["status"] == "retry"

    # Should do nothing yet
    worker.send_task("mrq.basetasks.cleaning.RequeueRetryJobs", {}, block=True)

    assert Queue("default").size() == 0
    assert Queue("noexec").size() == 0

    time.sleep(2)

    # Should requeue
    worker.send_task("mrq.basetasks.cleaning.RequeueRetryJobs", {}, block=True)

    assert Queue("default").size() == 0
    assert Queue("noexec").size() == 1

    job = Job(job_id).fetch()
    assert job.data["status"] == "queued"
Beispiel #22
0
    def run(self, params):

        redis_key_started = Queue.redis_key_started()

        stats = {
            "fetched": 0,
            "requeued": 0
        }

        # Fetch all the jobs started more than a minute ago - they should not
        # be in redis:started anymore
        job_ids = connections.redis.zrangebyscore(
            redis_key_started, "-inf", time.time() - params.get("timeout", 60))

        # TODO this should be wrapped inside Queue or Worker
        # we shouldn't access these internals here
        queue_obj = Queue("default")
        unserialized_job_ids = queue_obj.unserialize_job_ids(job_ids)

        for i, job_id in enumerate(job_ids):

            queue = Job(unserialized_job_ids[i], start=False, fetch=False).fetch(
                full_data=True).data["queue"]

            queue_obj = Queue(queue)

            stats["fetched"] += 1

            log.info("Requeueing %s on %s" % (unserialized_job_ids[i], queue))

            # TODO LUA script & don't rpush if not in zset anymore.
            with connections.redis.pipeline(transaction=True) as pipeline:
                pipeline.zrem(redis_key_started, job_id)
                pipeline.rpush(queue_obj.redis_key, job_id)
                pipeline.execute()

            stats["requeued"] += 1

        return stats
Beispiel #23
0
    def run(self, params):

        max_age = int(params.get("max_age") or (7 * 86400))

        known_queues = Queue.redis_known_queues()

        # Only clean queues older than N days
        time_threshold = time.time() - max_age
        for queue, time_last_used in known_queues.iteritems():
            if time_last_used < time_threshold:
                q = Queue(queue, add_to_known_queues=False)
                if q.size() == 0:
                    q.remove_from_known_queues()
Beispiel #24
0
def test_abort(worker):

    worker.start()

    worker.send_task("tests.tasks.general.Abort", {"a": 41},
                     accept_statuses=["abort"])

    assert Queue("default").size() == 0

    db_jobs = list(worker.mongodb_jobs.mrq_jobs.find())
    assert len(db_jobs) == 1

    assert db_jobs[0]["status"] == "abort"
Beispiel #25
0
def test_interrupt_maxjobs(worker):

    # The worker will stop after doing 5 jobs
    worker.start(flags="--max_jobs 5 --greenlets 2", queues="test1 default")

    worker.send_tasks("tests.tasks.general.Add", [
        {"a": i, "b": 1, "sleep": 0}
        for i in range(12)
    ], block=False)

    time.sleep(2)

    assert Queue("default").size() == 7
Beispiel #26
0
    def run(self, params):

        collection = connections.mongodb_jobs.mrq_jobs

        # If there are more than this much items on the queue, we don't try to check if our mongodb
        # jobs are still queued.
        max_queue_items = params.get("max_queue_items", 1000)

        stats = {"fetched": 0, "requeued": 0}

        for job_data in collection.find({
                "status": "queued"
        },
                                        fields={
                                            "_id": 1,
                                            "queue": 1
                                        }).sort([("_id", 1)]):

            stats["fetched"] += 1

            queue = Queue(job_data["queue"])
            queue_size = queue.size()
            if queue_size > max_queue_items:
                log.info("Stopping because queue %s has %s items" %
                         (queue, queue_size))
                break

            queue_jobs_ids = set(queue.list_job_ids(limit=max_queue_items + 1))
            if len(queue_jobs_ids) >= max_queue_items:
                log.info(
                    "Stopping because queue %s actually had more than %s items"
                    % (queue, len(queue_jobs_ids)))
                break

            if str(job_data["_id"]) in queue_jobs_ids:
                log.info("Stopping because we found job %s in redis" %
                         job_data["_id"])
                break

            # At this point, this job is not on the queue and we're sure
            # the queue is less than max_queue_items
            # We can safely requeue the job.
            log.info("Requeueing %s on %s" % (job_data["_id"], queue.id))

            stats["requeued"] += 1
            job = Job(job_data["_id"])
            job.requeue(queue=job_data["queue"])

        return stats
Beispiel #27
0
def test_sorted_graph(worker):

    p_queue = "test_sorted_set"

    worker.start_deps()

    assert Queue(p_queue).size() == 0

    worker.send_raw_tasks(p_queue, {
        "000": -1,
        "aaa": 1,
        "aaa2": 1.5,
        "bbb": 2,
        "ccc": 4
    }, start=False, block=False)
    time.sleep(0.5)

    assert Queue(p_queue).size() == 5
    assert Queue(p_queue).get_sorted_graph(
        1, 4, slices=3, include_inf=True) == [1, 2, 1, 0, 1]
    assert Queue(p_queue).get_sorted_graph(
        1, 4, slices=3, include_inf=False) == [2, 1, 0]

    worker.stop_deps()
Beispiel #28
0
def test_abort(worker):

    worker.start()

    worker.send_task("tests.tasks.general.Abort", {"a": 41}, accept_statuses=["abort"])

    assert Queue("default").size() == 0

    db_jobs = list(worker.mongodb_jobs.mrq_jobs.find())
    assert len(db_jobs) == 1

    job = db_jobs[0]
    assert job["status"] == "abort"
    assert job.get("dateexpires") is not None
    assert job["dateexpires"] < datetime.utcnow() + timedelta(hours=24)
Beispiel #29
0
    def send_raw_tasks(self, queue, params_list, start=True, block=True):
        if not self.started and start:
            self.start()

        queue_raw_jobs(queue, params_list)

        if block:
            # Wait for the queue to be empty. Might be error-prone when tasks
            # are in-memory between the 2
            q = Queue(queue)
            while q.size() > 0 or self.mongodb_jobs.mrq_jobs.find({"status": "started"}).count() > 0:
                # print "S", q.size(),
                # self.mongodb_jobs.mrq_jobs.find({"status":
                # "started"}).count()
                time.sleep(0.1)
Beispiel #30
0
def test_retry_max_retries_zero(worker):

    # Task has maxretries=1
    worker.start(flags="--config tests/fixtures/config-retry1.py")

    worker.send_task("tests.tasks.general.Retry", {"max_retries": 0},
                     block=True,
                     accept_statuses=["maxretries"])

    assert Queue("default").size() == 0

    job_id = worker.mongodb_jobs.mrq_jobs.find()[0]["_id"]

    job = Job(job_id).fetch()
    assert job.data["status"] == "maxretries"