예제 #1
0
def test_retry_from_other_queue_stays_on_queue(worker):
    worker.start(queues="default exec")

    worker.send_task("tests.tasks.general.Retry", {"delay": 1},
                     queue="exec",
                     accept_statuses="retry")

    time.sleep(2)

    job_id = worker.mongodb_jobs.mrq_jobs.find()[0]["_id"]
    job = Job(job_id).fetch()

    assert job.data["status"] == "retry"
    assert job.data["queue"] == "exec"

    assert Queue("default").size() == 0
    assert Queue("exec").size() == 0

    worker.stop(deps=False)

    worker.start(queues="default", deps=False)

    # Should do nothing yet
    worker.send_task("mrq.basetasks.cleaning.RequeueRetryJobs", {}, block=True)

    assert Queue("default").size() == 0
    assert Queue("exec").size() == 1

    job = Job(job_id).fetch()
    assert job.data["status"] == "queued"
    assert job.data["queue"] == "exec"
예제 #2
0
def test_queue_notify(worker, worker2):

    worker.start(flags="--max_latency 30 --config tests/fixtures/config-notify.py", queues="withnotify withoutnotify")

    # Used to queue jobs in the same environment & config!
    worker2.start(flags="--config tests/fixtures/config-notify.py")

    id1 = worker2.send_task("tests.tasks.general.SendTask", {
        "params": {"a": 42, "b": 1},
        "path": "tests.tasks.general.Add",
        "queue": "withnotify"
    })

    time.sleep(2)

    assert Job(id1).fetch().data["status"] == "success"
    assert Job(id1).fetch().data["result"] == 43

    id2 = worker2.send_task("tests.tasks.general.SendTask", {
        "params": {"a": 43, "b": 1},
        "path": "tests.tasks.general.Add",
        "queue": "withoutnotify"
    })

    time.sleep(2)

    assert Job(id2).fetch().data["status"] == "queued"
예제 #3
0
def test_interrupt_maxconcurrency(worker):

    # The worker will raise a maxconcurrency on the second job
    worker.start(flags="--greenlets=2")

    job_ids = worker.send_tasks("tests.tasks.concurrency.LockedAdd",
                                [{
                                    "a": i,
                                    "b": 1,
                                    "sleep": 2
                                } for i in range(2)],
                                block=False)

    worker.wait_for_tasks_results(
        job_ids, accept_statuses=["success", "failed", "maxconcurrency"])
    job_statuses = [Job(job_id).fetch().data["status"] for job_id in job_ids]
    assert set(job_statuses) == set(["success", "maxconcurrency"])

    # the job concurrency key must be equal to 0
    last_job_id = worker.send_task("tests.tasks.concurrency.LockedAdd", {
        "a": 1,
        "b": 1,
        "sleep": 2
    },
                                   block=False)

    last_job = Job(last_job_id).wait(poll_interval=0.01)
    assert last_job.get("status") == "success"
예제 #4
0
def test_interrupt_worker_gracefully(worker, p_flags):
    """ Test what happens when we interrupt a running worker gracefully. """

    worker.start(flags=p_flags)

    job_id = worker.send_task(
        "tests.tasks.general.Add", {"a": 41, "b": 1, "sleep": 5}, block=False)

    time.sleep(2)

    job = Job(job_id).fetch().data
    assert job["status"] == "started"

    # Stop the worker gracefully. first job should still finish!
    worker.stop(block=False, deps=False)

    time.sleep(1)

    # Should not be accepting new jobs!
    job_id2 = worker.send_task(
        "tests.tasks.general.Add", {"a": 42, "b": 1, "sleep": 4}, block=False)

    time.sleep(1)

    job = Job(job_id2).fetch().data
    assert job.get("status") == "queued"

    time.sleep(4)

    job = Job(job_id).fetch().data
    assert job["status"] == "success"
    assert job["result"] == 42

    job = Job(job_id2).fetch().data
    assert job.get("status") == "queued"
예제 #5
0
def test_retry_otherqueue_delay_nonzero(worker):

    # delay = 0 should requeue right away.
    worker.send_task("tests.tasks.general.Retry", {
        "queue": "noexec",
        "delay": 2
    },
                     block=True,
                     accept_statuses=["retry"])

    assert Queue("default").size() == 0
    assert Queue("noexec").size() == 0

    job_id = worker.mongodb_jobs.mrq_jobs.find()[0]["_id"]

    job = Job(job_id).fetch()
    assert job.data["status"] == "retry"

    # Should do nothing yet
    worker.send_task("mrq.basetasks.cleaning.RequeueRetryJobs", {}, block=True)

    assert Queue("default").size() == 0
    assert Queue("noexec").size() == 0

    time.sleep(2)

    # Should requeue
    worker.send_task("mrq.basetasks.cleaning.RequeueRetryJobs", {}, block=True)

    assert Queue("default").size() == 0
    assert Queue("noexec").size() == 1

    job = Job(job_id).fetch()
    assert job.data["status"] == "queued"
예제 #6
0
def test_cancel_by_path(worker):

  # Start the worker with only one greenlet so that tasks execute sequentially
  worker.start(flags="--gevent 1")

  job_id1 = worker.send_task("tests.tasks.general.MongoInsert", {"a": 41, "sleep": 2}, block=False)

  worker.send_task("mrq.basetasks.utils.JobAction", {
    "path": "tests.tasks.general.MongoInsert",
    "status": "queued",
    "action": "cancel"
  }, block=False)

  job_id2 = worker.send_task("tests.tasks.general.MongoInsert", {"a": 43}, block=False)

  Job(job_id2).wait(poll_interval=0.01)

  # Leave some time to unqueue job_id2 without executing.
  time.sleep(1)
  worker.stop(deps=False)

  job1 = Job(job_id1).fetch().data
  job2 = Job(job_id2).fetch().data

  assert job1["status"] == "success"
  assert job1["result"] == {"a": 41, "sleep": 2}

  assert job2["status"] == "cancel"
  assert job2["dateexpires"] > job2["dateupdated"]

  assert job2.get("result") is None

  assert worker.mongodb_logs.tests_inserts.count() == 1

  assert Queue("default").size() == 0
예제 #7
0
def test_interrupt_worker_double_sigint(worker, p_flags):
    """ Test what happens when we interrupt a running worker with 2 SIGINTs. """

    start_time = time.time()

    worker.start(flags=p_flags)

    job_id = worker.send_task("tests.tasks.general.Add", {"a": 41, "b": 1, "sleep": 20}, block=False)

    while Job(job_id).fetch().data["status"] == "queued":
        time.sleep(0.1)

    job = Job(job_id).fetch().data
    assert job["status"] == "started"

    # Stop the worker gracefully. first job should still finish!
    worker.stop(block=False, deps=False)

    time.sleep(1)

    # Should not be accepting new jobs!
    job_id2 = worker.send_task("tests.tasks.general.Add", {"a": 42, "b": 1, "sleep": 20}, block=False)

    time.sleep(1)

    job2 = Job(job_id2).fetch().data
    assert job2.get("status") == "queued"

    job = Job(job_id).fetch().data
    assert job["status"] == "started"

    # Sending a second kill -2 should make it stop
    worker.stop(block=True, deps=False, force=True)

    while Job(job_id).fetch().data["status"] == "started":
        time.sleep(0.1)

    job = Job(job_id).fetch().data
    assert job["status"] == "interrupt"

    assert time.time() - start_time < 15

    # Then try the cleaning task that requeues interrupted jobs

    assert Queue("default").size() == 1

    worker.start(queues="cleaning", deps=False, flush=False)

    res = worker.send_task("mrq.basetasks.cleaning.RequeueInterruptedJobs", {}, block=True, queue="cleaning")

    assert res["requeued"] == 1

    assert Queue("default").size() == 2

    Queue("default").list_job_ids() == [str(job_id2), str(job_id)]

    job = Job(job_id).fetch().data
    assert job["status"] == "queued"
    assert job["queue"] == "default"
예제 #8
0
파일: cleaning.py 프로젝트: leezqcst/mrq
    def run(self, params):

        # If there are more than this much items on the queue, we don't try to check if our mongodb
        # jobs are still queued.
        max_queue_items = params.get("max_queue_items", 1000)

        stats = {"fetched": 0, "requeued": 0}

        all_queues = Queue.all_known()

        for queue_name in all_queues:

            queue = Queue(queue_name)
            queue_size = queue.size()

            if queue.is_raw:
                continue

            log.info("Checking queue %s" % queue_name)

            if queue_size > max_queue_items:
                log.info("Stopping because queue %s has %s items" %
                         (queue_name, queue_size))
                continue

            queue_jobs_ids = set(queue.list_job_ids(limit=max_queue_items + 1))
            if len(queue_jobs_ids) >= max_queue_items:
                log.info(
                    "Stopping because queue %s actually had more than %s items"
                    % (queue_name, len(queue_jobs_ids)))
                continue

            for job_data in connections.mongodb_jobs.mrq_jobs.find(
                {
                    "queue": queue_name,
                    "status": "queued"
                },
                    projection={
                        "_id": 1
                    }).sort([["_id", 1]]):

                stats["fetched"] += 1

                if str(job_data["_id"]) in queue_jobs_ids:
                    log.info("Found job %s on queue %s. Stopping" %
                             (job_data["_id"], queue.id))
                    break

                # At this point, this job is not on the queue and we're sure
                # the queue is less than max_queue_items
                # We can safely requeue the job.
                log.info("Requeueing %s on %s" % (job_data["_id"], queue.id))

                stats["requeued"] += 1
                job = Job(job_data["_id"])
                job.requeue(queue=queue_name)

        return stats
예제 #9
0
    def run(self, params):

        # If there are more than this much items on the queue, we don't try to check if our mongodb
        # jobs are still queued.
        max_queue_items = params.get("max_queue_items", 1000)

        stats = {"fetched": 0, "requeued": 0}

        # This was only checking in Redis and wasn't resistant to a redis-wide flush.
        # Doing Queue.all() is slower but covers more edge cases.
        # all_queues = Queue.all_known()

        all_queues = Queue.all()

        log.info("Checking %s queues" % len(all_queues))

        for queue_name in all_queues:

            queue = Queue(queue_name)
            queue_size = queue.size()

            # If the queue is raw, the jobs were only stored in redis so they are lost for good.
            if queue.is_raw:
                continue

            log.info("Checking queue %s" % queue_name)

            if queue_size > max_queue_items:
                log.info("Stopping because queue %s has %s items" % (queue_name, queue_size))
                continue

            queue_jobs_ids = set(queue.list_job_ids(limit=max_queue_items + 1))
            if len(queue_jobs_ids) >= max_queue_items:
                log.info(
                    "Stopping because queue %s actually had more than %s items" % (queue_name, len(queue_jobs_ids))
                )
                continue

            for job_data in connections.mongodb_jobs.mrq_jobs.find(
                {"queue": queue_name, "status": "queued"}, projection={"_id": 1}
            ).sort([["_id", 1]]):

                stats["fetched"] += 1

                if str(job_data["_id"]) in queue_jobs_ids:
                    log.info("Found job %s on queue %s. Stopping" % (job_data["_id"], queue.id))
                    break

                # At this point, this job is not on the queue and we're sure
                # the queue is less than max_queue_items
                # We can safely requeue the job.
                log.info("Requeueing %s on %s" % (job_data["_id"], queue.id))

                stats["requeued"] += 1
                job = Job(job_data["_id"])
                job.requeue(queue=queue_name)

        return stats
예제 #10
0
파일: test_retry.py 프로젝트: nfredrik/mrq
def test_retry(worker):

  job_id = worker.send_task("mrq.basetasks.tests.general.Retry", {"queue": "noexec", "countdown": 60}, block=False)

  job_data = Job(job_id).wait(poll_interval=0.01, full_data=True)

  assert job_data["queue"] == "noexec"
  assert job_data["status"] == "retry"
  assert job_data["dateretry"] > datetime.datetime.utcnow()
  assert job_data.get("result") is None
예제 #11
0
파일: app.py 프로젝트: PUNTOZERO/imgfab
def get_job():

    job_id = request.args['job_id']

    job = Job(job_id)
    job.fetch()

    if job.data["params"].get("user"):
        if not g.user.is_authenticated() or (job.data["params"].get("user") != str(g.user.id)):
            return "Unauthorized."

    return json.dumps({k: v for k, v in job.data.iteritems() if k in ("status", "result")})
예제 #12
0
def test_interrupt_worker_double_sigint(worker, p_flags):
    """ Test what happens when we interrupt a running worker with 2 SIGINTs. """

    start_time = time.time()

    worker.start(flags=p_flags)

    job_id = worker.send_task(
        "tests.tasks.general.Add", {"a": 41, "b": 1, "sleep": 10}, block=False)

    time.sleep(1)

    job = Job(job_id).fetch().data
    assert job["status"] == "started"

    # Stop the worker gracefully. first job should still finish!
    worker.stop(block=False, deps=False)

    time.sleep(1)

    # Should not be accepting new jobs!
    job_id2 = worker.send_task(
        "tests.tasks.general.Add", {"a": 42, "b": 1, "sleep": 10}, block=False)

    time.sleep(1)

    job = Job(job_id2).fetch().data
    assert job.get("status") == "queued"

    # Sending a second kill -2 should make it stop
    worker.stop(block=True, deps=False, force=True)

    time.sleep(1)

    job = Job(job_id).fetch().data
    assert job["status"] == "interrupt"

    assert time.time() - start_time < 8

    # Then try the cleaning task that requeues interrupted jobs

    assert Queue("default").size() == 1

    worker.start(queues="cleaning", deps=False, flush=False)

    res = worker.send_task(
        "mrq.basetasks.cleaning.RequeueInterruptedJobs", {}, block=True, queue="cleaning")

    assert res["requeued"] == 1

    assert Queue("default").size() == 2

    Queue("default").list_job_ids() == [str(job_id2), str(job_id)]

    job = Job(job_id).fetch().data
    assert job["status"] == "queued"
    assert job["queue"] == "default"
예제 #13
0
파일: test_retry.py 프로젝트: nfredrik/mrq
def test_retry_cancel_on_retry(worker):

  job_id = worker.send_task("mrq.basetasks.tests.general.Retry", {
    "queue": "noexec",
    "countdown": 60,
    "cancel_on_retry": True
  }, block=False)

  job_data = Job(job_id).wait(poll_interval=0.01, full_data=True)

  assert job_data["status"] == "cancel"
  assert job_data["queue"] == "default"
  assert job_data.get("result") is None
예제 #14
0
파일: conftest.py 프로젝트: bossjones/mrq
  def wait_for_tasks_results(self, job_ids, block=True, accept_statuses=["success"]):

    if not block:
      return job_ids

    results = []

    for job_id in job_ids:
      job = Job(job_id).wait(poll_interval=0.01)
      assert job.get("status") in accept_statuses, "Job had status %s, not in %s. Dump: %s" % (job.get("status"), accept_statuses, job)

      results.append(job.get("result"))

    return results
예제 #15
0
def test_retry(worker):

    job_id = worker.send_task("tests.tasks.general.Retry", {
        "queue": "noexec",
        "delay": 60
    },
                              block=False)

    job_data = Job(job_id).wait(poll_interval=0.01, full_data=True)

    assert job_data["queue"] == "noexec"
    assert job_data["status"] == "retry"
    assert job_data["dateretry"] > datetime.datetime.utcnow()
    assert job_data.get("result") is None
예제 #16
0
파일: test_cli.py 프로젝트: bossjones/mrq
def test_cli_run_nonblocking(worker):

  worker.start_deps()

  job_id1 = worker.send_task_cli("tests.tasks.general.Add", {"a": 41, "b": 1}, block=False)

  job1 = Job(job_id1).fetch()

  job1.wait(poll_interval=0.01)

  job1.fetch()

  assert job1.data["status"] == "success"
  assert job1.data["result"] == 42
예제 #17
0
    def run(self, params):

        collection = connections.mongodb_jobs.mrq_jobs

        # If there are more than this much items on the queue, we don't try to check if our mongodb
        # jobs are still queued.
        max_queue_items = params.get("max_queue_items", 1000)

        stats = {"fetched": 0, "requeued": 0}

        for job_data in collection.find({
                "status": "queued"
        },
                                        fields={
                                            "_id": 1,
                                            "queue": 1
                                        }).sort([("_id", 1)]):

            stats["fetched"] += 1

            queue = Queue(job_data["queue"])
            queue_size = queue.size()
            if queue_size > max_queue_items:
                log.info("Stopping because queue %s has %s items" %
                         (queue, queue_size))
                break

            queue_jobs_ids = set(queue.list_job_ids(limit=max_queue_items + 1))
            if len(queue_jobs_ids) >= max_queue_items:
                log.info(
                    "Stopping because queue %s actually had more than %s items"
                    % (queue, len(queue_jobs_ids)))
                break

            if str(job_data["_id"]) in queue_jobs_ids:
                log.info("Stopping because we found job %s in redis" %
                         job_data["_id"])
                break

            # At this point, this job is not on the queue and we're sure
            # the queue is less than max_queue_items
            # We can safely requeue the job.
            log.info("Requeueing %s on %s" % (job_data["_id"], queue.id))

            stats["requeued"] += 1
            job = Job(job_data["_id"])
            job.requeue(queue=job_data["queue"])

        return stats
예제 #18
0
    def wait_for_tasks_results(self, job_ids, block=True, accept_statuses=["success"]):

        if not block:
            return job_ids

        results = []

        for job_id in job_ids:
            job = Job(job_id).wait(poll_interval=0.01)
            assert job.get("status") in accept_statuses, "Job had status %s, not in %s. Dump: %s" % (
                job.get("status"), accept_statuses, job)

            results.append(job.get("result"))

        return results
예제 #19
0
파일: app.py 프로젝트: yazz-xebu-xyz/imgfab
def get_job():

    job_id = request.args['job_id']

    job = Job(job_id)
    job.fetch()

    if job.data["params"].get("user"):
        if not g.user.is_authenticated() or (job.data["params"].get("user") !=
                                             str(g.user.id)):
            return "Unauthorized."

    return json.dumps(
        {k: v
         for k, v in job.data.iteritems() if k in ("status", "result")})
예제 #20
0
def test_interrupt_worker_sigterm(worker, p_flags):
    """ Test what happens when we interrupt a running worker with 1 SIGTERM.

        We should have had time to mark the task as 'interrupt' so that we can restart it somewhere else right away.
    """

    start_time = time.time()

    worker.start(flags=p_flags)

    job_id = worker.send_task("tests.tasks.general.Add", {
        "a": 41,
        "b": 1,
        "sleep": 10
    },
                              block=False)

    time.sleep(1)

    worker.stop(block=True, sig=15, deps=False)

    time.sleep(2)

    job = Job(job_id).fetch().data
    assert job["status"] == "interrupt"

    assert time.time() - start_time < 6

    worker.stop_deps()
예제 #21
0
def test_disconnects_service_during_task(worker, p_service):
    """ Test what happens when mongodb disconnects during a job
    """

    worker.start()

    if p_service == "mongodb":
        service = worker.fixture_mongodb
    elif p_service == "redis":
        service = worker.fixture_redis

    service_pid = service.process.pid

    job_id1 = worker.send_task("tests.tasks.general.Add", {
        "a": 41,
        "b": 1,
        "sleep": 5
    },
                               block=False,
                               queue="default")

    time.sleep(2)

    service.stop()
    service.start()

    service_pid2 = service.process.pid

    # Make sure we did restart
    assert service_pid != service_pid2

    time.sleep(5)

    # Result should be there without issues
    assert Job(job_id1).fetch().data["result"] == 42
예제 #22
0
def test_cancel_by_path(worker):

    # Start the worker with only one greenlet so that tasks execute
    # sequentially
    worker.start(flags="--greenlets 1")

    job_id1 = worker.send_task("tests.tasks.general.MongoInsert", {
        "a": 41,
        "sleep": 2
    },
                               block=False)
    worker.wait_for_idle()

    job_id2 = worker.send_task("tests.tasks.general.MongoInsert", {"a": 43},
                               block=False,
                               queue="testMrq")

    worker.send_task("mrq.basetasks.utils.JobAction", {
        "path": "tests.tasks.general.MongoInsert",
        "status": "queued",
        "action": "cancel"
    },
                     block=False)
    worker.wait_for_idle()

    # Leave some time to unqueue job_id2 without executing.
    time.sleep(1)
    worker.stop(deps=False)

    job1 = Job(job_id1).fetch().data
    job2 = Job(job_id2).fetch().data

    assert job1["status"] == "success"
    assert job1["result"] == {"a": 41, "sleep": 2}

    assert job2["status"] == "cancel"
    assert job2["dateexpires"] > job2["dateupdated"]

    assert job2.get("result") is None

    assert worker.mongodb_jobs.tests_inserts.count() == 1

    assert Queue("default").size() == 0

    worker.stop_deps()
예제 #23
0
def test_custom_delimiters(worker, delimiter):

    queue = "main" + delimiter
    subqueue = queue + "subqueue"

    worker.start(queues=queue, flags="--subqueues_refresh_interval=0.1 --subqueues_delimiter=%s" % delimiter)
    job_id = worker.send_task("tests.tasks.general.GetTime", {}, queue=subqueue, block=False)
    Job(job_id).wait(poll_interval=0.01)
    worker.stop()
예제 #24
0
def test_interrupt_maxconcurrency(worker):

    # The worker will raise a maxconcurrency on the second job
    worker.start(flags="--greenlets=2")

    job_ids = worker.send_tasks(
        "tests.tasks.concurrency.LockedAdd", [{"a": i, "b": 1, "sleep": 2} for i in range(2)], block=False
    )

    worker.wait_for_tasks_results(job_ids, accept_statuses=["success", "failed", "maxconcurrency"])
    job_statuses = [Job(job_id).fetch().data["status"] for job_id in job_ids]
    assert set(job_statuses) == set(["success", "maxconcurrency"])

    # the job concurrency key must be equal to 0
    last_job_id = worker.send_task("tests.tasks.concurrency.LockedAdd", {"a": 1, "b": 1, "sleep": 2}, block=False)

    last_job = Job(last_job_id).wait(poll_interval=0.01)
    assert last_job.get("status") == "success"
예제 #25
0
def test_retry(worker):
    worker.start(flags="--config tests/fixtures/config-scheduler8.py")

    job_id = worker.send_task("tests.tasks.general.Retry", {
        "queue": "noexec",
        "delay": 60
    },
                              block=False)

    job_data = Job(job_id).wait(poll_interval=0.01, full_data=True)

    assert job_data["queue"] == "noexec"
    assert job_data["status"] == "retry"
    assert job_data["dateretry"] > datetime.datetime.utcnow()
    assert datetime.datetime.utcnow() + datetime.timedelta(
        days=1) < job_data["dateexpires"] < datetime.datetime.utcnow(
        ) + datetime.timedelta(days=3)
    assert job_data.get("result") is None
예제 #26
0
파일: cleaning.py 프로젝트: nfredrik/mrq
  def run(self, params):

    self.collection = connections.mongodb_jobs.mrq_jobs

    # If there are more than this much items on the queue, we don't try to check if our mongodb
    # jobs are still queued.
    max_queue_items = params.get("max_queue_items", 1000)

    stats = {
      "fetched": 0,
      "requeued": 0
    }

    for job_data in self.collection.find({
      "status": "queued"
    }, fields={"_id": 1, "queue": 1}).sort([("_id", 1)]):

      stats["fetched"] += 1

      queue = Queue(job_data["queue"])
      queue_size = queue.size()
      if queue_size > max_queue_items:
        log.info("Stopping because queue %s has %s items" % (queue, queue_size))
        break

      queue_jobs_ids = set(queue.list_job_ids(limit=max_queue_items + 1))
      if len(queue_jobs_ids) >= max_queue_items:
        log.info("Stopping because queue %s actually had more than %s items" % (queue, len(queue_jobs_ids)))
        break

      if str(job_data["_id"]) in queue_jobs_ids:
        log.info("Stopping because we found job %s in redis" % job_data["_id"])
        break

      # At this point, this job is not on the queue and we're sure the queue is less than max_queue_items
      # We can safely requeue the job.
      log.info("Requeueing %s on %s" % (job_data["_id"], queue.id))

      stats["requeued"] += 1
      job = Job(job_data["_id"])
      job.requeue(queue=job_data["queue"])

    return stats
예제 #27
0
def test_retry_max_retries(worker):

    # Task has maxretries=1
    worker.start(flags="--config tests/fixtures/config-retry1.py")

    worker.send_task("tests.tasks.general.Retry", {},
                     block=True,
                     accept_statuses=["retry"])

    assert Queue("default").size() == 0

    job_id = worker.mongodb_jobs.mrq_jobs.find()[0]["_id"]

    job = Job(job_id).fetch()
    assert job.data["status"] == "retry"
    assert job.data["retry_count"] == 1

    time.sleep(2)

    # Should requeue
    worker.send_task("mrq.basetasks.cleaning.RequeueRetryJobs", {}, block=True)

    time.sleep(2)

    assert Queue("default").size() == 0

    job = Job(job_id).fetch()
    assert job.data["status"] == "maxretries"
    assert job.data["retry_count"] == 1

    # Then, manual requeue from the dashboard should reset the retry_count field.
    params = {
        "action": "requeue",
        "status": "maxretries",
        "destination_queue": "noexec"
    }

    worker.send_task("mrq.basetasks.utils.JobAction", params, block=True)

    job = Job(job_id).fetch()
    assert job.data["status"] == "queued"
    assert job.data["queue"] == "noexec"
    assert job.data["retry_count"] == 0
예제 #28
0
def test_matchable_subqueues(worker, queues, enqueue_on):
    worker.start(queues=" ".join(queues), flags="--subqueues_refresh_interval=0.1")

    job_ids = []

    for subqueue in enqueue_on:
        job_id = worker.send_task("tests.tasks.general.GetTime", {}, queue=subqueue, block=False)
        job_ids.append(job_id)

    assert all([Job(j).wait(poll_interval=0.01, timeout=3) for j in job_ids])
    worker.stop()
예제 #29
0
def test_pause_resume(worker):

    worker.start(flags="--paused_queues_refresh_interval=0.1")

    Queue("high").pause()

    assert Queue("high").is_paused()

    # wait for the paused_queues list to be refreshed
    time.sleep(2)

    job_id1 = send_task("tests.tasks.general.MongoInsert", {"a": 41},
                        queue="high")

    job_id2 = send_task("tests.tasks.general.MongoInsert", {"a": 43},
                        queue="low")

    time.sleep(5)

    job1 = Job(job_id1).fetch().data
    job2 = Job(job_id2).fetch().data

    assert job1["status"] == "queued"

    assert job2["status"] == "success"
    assert job2["result"] == {"a": 43}

    assert worker.mongodb_jobs.tests_inserts.count() == 1

    Queue("high").resume()

    Job(job_id1).wait(poll_interval=0.01)

    job1 = Job(job_id1).fetch().data

    assert job1["status"] == "success"
    assert job1["result"] == {"a": 41}

    assert worker.mongodb_jobs.tests_inserts.count() == 2

    worker.stop()
예제 #30
0
def test_job_requeue(worker):
    from mrq.context import connections
    from mrq.job import Job

    worker.start()
    job_id = worker.send_task("tests.tasks.general.RaiseException", {},
                              block=False)
    worker.wait_for_idle()
    assert int(connections.redis.get("queuesize:%s" % "default")) == 0

    Job(job_id).requeue()
    assert int(connections.redis.get("queuesize:%s" % "default")) == 1
예제 #31
0
def test_interrupt_worker_gracefully(worker, p_flags):
    """ Test what happens when we interrupt a running worker gracefully. """

    worker.start(flags=p_flags)

    job_id = worker.send_task(
        "tests.tasks.general.Add", {"a": 41, "b": 1, "sleep": 5}, block=False)

    time.sleep(2)

    job = Job(job_id).fetch().data
    assert job["status"] == "started"

    # Stop the worker gracefully. first job should still finish!
    worker.stop(block=False, deps=False)

    time.sleep(1)

    # Should not be accepting new jobs!
    job_id2 = worker.send_task(
        "tests.tasks.general.Add", {"a": 42, "b": 1, "sleep": 4}, block=False)

    time.sleep(1)

    job = Job(job_id2).fetch().data
    assert job.get("status") == "queued"

    time.sleep(4)

    job = Job(job_id).fetch().data
    assert job["status"] == "success"
    assert job["result"] == 42

    job = Job(job_id2).fetch().data
    assert job.get("status") == "queued"
예제 #32
0
    def run(self, params):

        additional_timeout = params.get("timeout", 300)

        stats = {
            "requeued": 0,
            "started": 0
        }

        # There shouldn't be that much "started" jobs so we can quite safely
        # iterate over them.

        fields = {"_id": 1, "datestarted": 1, "queue": 1, "path": 1, "retry_count": 1}
        for job_data in connections.mongodb_jobs.mrq_jobs.find(
                {"status": "started"}, projection=fields):
            job = Job(job_data["_id"])
            job.set_data(job_data)

            stats["started"] += 1

            expire_date = datetime.datetime.utcnow(
            ) - datetime.timedelta(seconds=job.timeout + additional_timeout)

            if job_data["datestarted"] < expire_date:
                log.debug("Requeueing job %s" % job.id)
                job.requeue()
                stats["requeued"] += 1

        return stats
예제 #33
0
def test_matchable_subqueues(worker, queues, enqueue_on):
    worker.start(queues=" ".join(queues),
                 flags="--subqueues_refresh_interval=0.1")

    job_ids = []

    for subqueue in enqueue_on:
        job_id = worker.send_task("tests.tasks.general.GetTime", {},
                                  queue=subqueue,
                                  block=False)
        job_ids.append(job_id)

    for i, job_id in enumerate(job_ids):
        print("Checking queue %s" % enqueue_on[i])
        assert Job(job_id).wait(poll_interval=0.01, timeout=5)
예제 #34
0
def test_retry_max_retries_zero(worker):

    # Task has maxretries=1
    worker.start(flags="--config tests/fixtures/config-retry1.py")

    worker.send_task("tests.tasks.general.Retry", {"max_retries": 0},
                     block=True,
                     accept_statuses=["maxretries"])

    assert Queue("default").size() == 0

    job_id = worker.mongodb_jobs.mrq_jobs.find()[0]["_id"]

    job = Job(job_id).fetch()
    assert job.data["status"] == "maxretries"
예제 #35
0
def test_refresh_interval(worker):
    """ Tests that a refresh interval of 0 disables the subqueue detection """

    worker.start(queues="test/", flags="--subqueues_refresh_interval=0")

    time.sleep(2)

    job_id1 = worker.send_task("tests.tasks.general.GetTime", {"a": 41},
                               queue="test/subqueue",
                               block=False)

    time.sleep(5)

    job1 = Job(job_id1).fetch().data

    assert job1["status"] == "queued"
예제 #36
0
def test_unmatchable_subqueues(worker, queue, enqueue_on):
    worker.start(queues=queue, flags="--subqueues_refresh_interval=0.1")

    job_ids = []

    for subqueue in enqueue_on:
        job_id = worker.send_task("tests.tasks.general.GetTime", {}, queue=subqueue, block=False)
        job_ids.append(job_id)

    time.sleep(2)
    results = [Job(j).fetch().data.get("status") for j in job_ids]

    # ensure tasks are not consumed by a worker
    assert results == ["queued"] * len(results)

    worker.stop()
예제 #37
0
def test_pause_subqueue(worker):

    # set config in current context in order to have a subqueue delimiter
    set_current_config(get_config(config_type="worker"))

    worker.start(
        queues="high high/",
        flags=
        "--subqueues_refresh_interval=1 --paused_queues_refresh_interval=1")

    Queue("high").pause()

    assert Queue("high/").is_paused()

    # wait for the paused_queues list to be refreshed
    time.sleep(2)

    job_id1 = send_task("tests.tasks.general.MongoInsert", {"a": 41},
                        queue="high")

    job_id2 = send_task("tests.tasks.general.MongoInsert", {"a": 43},
                        queue="high/subqueue")

    # wait a bit to make sure the jobs status will still be queued
    time.sleep(5)

    job1 = Job(job_id1).fetch().data
    job2 = Job(job_id2).fetch().data

    assert job1["status"] == "queued"
    assert job2["status"] == "queued"

    assert worker.mongodb_jobs.tests_inserts.count() == 0

    Queue("high/").resume()

    Job(job_id1).wait(poll_interval=0.01)

    Job(job_id2).wait(poll_interval=0.01)

    job1 = Job(job_id1).fetch().data
    job2 = Job(job_id2).fetch().data

    assert job1["status"] == "success"
    assert job1["result"] == {"a": 41}

    assert job2["status"] == "success"
    assert job2["result"] == {"a": 43}

    assert worker.mongodb_jobs.tests_inserts.count() == 2

    worker.stop()
예제 #38
0
def test_general_requeue_order(worker):
    from mrq.job import Job

    jobids = worker.send_tasks("tests.tasks.general.Add", [
        {"a": 41, "b": 1, "sleep": 4},
        {"a": 42, "b": 1, "sleep": 1},
        {"a": 43, "b": 1, "sleep": 1}
    ], block=False)

    time.sleep(2)

    # We should be executing job1 now. Let's requeue job2, making it go to the end of the queue.
    Job(jobids[1]).requeue()

    worker.wait_for_idle()

    assert [x["result"] for x in worker.mongodb_jobs.mrq_jobs.find().sort(
        [["dateupdated", 1]])] == [42, 44, 43]
예제 #39
0
def test_cli_run_nonblocking(worker):

    worker.start()

    job_id1 = worker.send_task_cli(
        "tests.tasks.general.Add", {"a": 41, "b": 1}, queue="default")

    job1 = Job(job_id1).fetch()

    job1.wait(poll_interval=0.01)

    job1.fetch()

    assert job1.data["status"] == "success"
    assert job1.data["result"] == 42
예제 #40
0
def test_pause_refresh_interval(worker):
    """ Tests that a refresh interval of 0 disables the pause functionnality """

    worker.start(flags="--paused_queues_refresh_interval=0")

    Queue("high").pause()

    assert Queue("high").is_paused()

    # wait for the paused_queues list to be refreshed
    time.sleep(2)

    job_id1 = send_task("tests.tasks.general.MongoInsert", {"a": 41},
                        queue="high")

    time.sleep(5)

    job1 = Job(job_id1).fetch().data

    assert job1["status"] == "success"
    assert job1["result"] == {"a": 41}
예제 #41
0
    def run(self, params):

        redis_key_started = Queue.redis_key_started()

        stats = {
            "fetched": 0,
            "requeued": 0
        }

        # Fetch all the jobs started more than a minute ago - they should not
        # be in redis:started anymore
        job_ids = connections.redis.zrangebyscore(
            redis_key_started, "-inf", time.time() - params.get("timeout", 60))

        # TODO this should be wrapped inside Queue or Worker
        # we shouldn't access these internals here
        queue_obj = Queue("default")
        unserialized_job_ids = queue_obj.unserialize_job_ids(job_ids)

        for i, job_id in enumerate(job_ids):

            queue = Job(unserialized_job_ids[i], start=False, fetch=False).fetch(
                full_data=True).data["queue"]

            queue_obj = Queue(queue)

            stats["fetched"] += 1

            log.info("Requeueing %s on %s" % (unserialized_job_ids[i], queue))

            # TODO LUA script & don't rpush if not in zset anymore.
            with connections.redis.pipeline(transaction=True) as pipeline:
                pipeline.zrem(redis_key_started, job_id)
                pipeline.rpush(queue_obj.redis_key, job_id)
                pipeline.execute()

            stats["requeued"] += 1

        return stats
예제 #42
0
파일: test_raw.py 프로젝트: AshBT/mrq
def test_raw_no_storage(worker):
    """ Test tasks that don't store unless they go to error status like 'failed' """

    worker.start(
        flags="--config tests/fixtures/config-raw1.py",
        queues="default testnostorage_raw"
    )

    jobs_collection = worker.mongodb_jobs.mrq_jobs
    test_collection = worker.mongodb_logs.tests_inserts

    worker.send_raw_tasks("testnostorage_raw", [
        "tests.tasks.general.MongoInsert 3"
    ], block=False)

    time.sleep(2)

    # No started inserted.
    assert jobs_collection.count() == 0

    time.sleep(2)

    # No success either, but we did insert
    assert test_collection.count() == 1
    assert jobs_collection.count() == 0
    test_collection.remove({})

    # However failed tasks get stored.

    worker.send_raw_tasks("testnostorage_raw", [
        "tests.tasks.general.RaiseException 0"
    ], block=False)

    time.sleep(2)

    # Failed was inserted.
    assert jobs_collection.count({"status": "failed", "path": "tests.tasks.general.RaiseException"}) == 1

    # If we requeue and don't raise, should be OK and inserted this time, even in success
    # no_storage depends on a raw queue, not a task path.
    _id = jobs_collection.find_one()["_id"]
    jobs_collection.update({"_id": _id}, {"$set": {"path": "tests.tasks.general.MongoInsert"}})
    job = Job(_id).fetch(full_data=True)
    job.requeue(queue="default")

    time.sleep(1)
    assert test_collection.count() == 1
    assert jobs_collection.count() == 1
    assert jobs_collection.count({"status": "success"}) == 1

    jobs_collection.remove({})

    # Test with retry: should be inserted
    worker.send_raw_tasks("testnostorage_raw", [
        "tests.tasks.general.Retry 0"
    ], block=False)

    assert jobs_collection.count({"status": "started"}) == 0

    time.sleep(2)

    assert jobs_collection.count({"status": "retry"}) == 1