Beispiel #1
0
def main():

  parser = argparse.ArgumentParser(description='Runs a task')

  cfg = config.get_config(parser=parser, config_type="run")
  cfg["is_cli"] = True
  set_current_config(cfg)
  log.info(cfg)
  if len(cfg["taskargs"]) == 1:
    params = json.loads(cfg["taskargs"][0])
  else:
    params = {}

    # mrq-run taskpath a 1 b 2 => {"a": "1", "b": "2"}
    for group in utils.group_iter(cfg["taskargs"], n=2):
      if len(group) != 2:
        print "Number of arguments wasn't even"
        sys.exit(1)
      params[group[0]] = group[1]

  if cfg["async"]:
    ret = queue.send_task(cfg["taskpath"], params, sync=False, queue=cfg["queue"])
    print ret
  else:
    worker_class = load_class_by_path(cfg["worker_class"])
    job = worker_class.job_class(None)
    job.data = {
      "path": cfg["taskpath"],
      "params": params,
      "queue": cfg["queue"]
    }
    job.datestarted = datetime.datetime.utcnow()
    set_current_job(job)
    ret = job.perform()
    print json.dumps(ret)
Beispiel #2
0
def main():

    parser = argparse.ArgumentParser(description='Runs a task')

    cfg = config.get_config(parser=parser, config_type="run", sources=("file", "env", "args"))
    cfg["is_cli"] = True
    set_current_config(cfg)

    if len(cfg["taskargs"]) == 1:
        params = json.loads(cfg["taskargs"][0])  # pylint: disable=no-member
    else:
        params = {}

        # mrq-run taskpath a 1 b 2 => {"a": "1", "b": "2"}
        for group in utils.group_iter(cfg["taskargs"], n=2):
            if len(group) != 2:
                print("Number of arguments wasn't even")
                sys.exit(1)
            params[group[0]] = group[1]

    if cfg["queue"]:
        ret = queue_job(cfg["taskpath"], params, queue=cfg["queue"])
        print(ret)
    else:
        worker_class = load_class_by_path(cfg["worker_class"])
        job = worker_class.job_class(None)
        job.set_data({
            "path": cfg["taskpath"],
            "params": params,
            "queue": cfg["queue"]
        })
        job.datestarted = datetime.datetime.utcnow()
        set_current_job(job)
        ret = job.perform()
        print(json_stdlib.dumps(ret, cls=MongoJSONEncoder))  # pylint: disable=no-member
Beispiel #3
0
    def perform_action(self, action, query, destination_queue):

        stats = {"requeued": 0, "cancelled": 0}

        if action == "cancel":

            default_job_timeout = get_current_config()["default_job_timeout"]

            # Finding the ttl here to expire is a bit hard because we may have mixed paths
            # and hence mixed ttls.
            # If we are cancelling by path, get this ttl
            if query.get("path"):
                result_ttl = get_task_cfg(query["path"]).get(
                    "result_ttl", default_job_timeout)

            # If not, get the maxmimum ttl of all tasks.
            else:

                tasks_defs = get_current_config().get("tasks", {})
                tasks_ttls = [
                    cfg.get("result_ttl", 0) for cfg in tasks_defs.values()
                ]

                result_ttl = max([default_job_timeout] + tasks_ttls)

            now = datetime.datetime.utcnow()
            ret = self.collection.update(query, {
                "$set": {
                    "status": "cancel",
                    "dateexpires":
                    now + datetime.timedelta(seconds=result_ttl),
                    "dateupdated": now
                }
            },
                                         multi=True)
            stats["cancelled"] = ret["n"]

            # Special case when emptying just by queue name: empty it directly!
            # In this case we could also loose some jobs that were queued after
            # the MongoDB update. They will be "lost" and requeued later like the other case
            # after the Redis BLPOP
            if query.keys() == ["queue"]:
                Queue(query["queue"]).empty()

        elif action in ("requeue", "requeue_retry"):

            # Requeue task by groups of maximum 1k items (if all in the same
            # queue)
            cursor = self.collection.find(query, projection=["_id", "queue"])

            # We must freeze the list because queries below would change it.
            # This could not fit in memory, research adding {"stats": {"$ne":
            # "queued"}} in the query
            fetched_jobs = list(cursor)

            for jobs in group_iter(fetched_jobs, n=1000):

                jobs_by_queue = defaultdict(list)
                for job in jobs:
                    jobs_by_queue[job["queue"]].append(job["_id"])
                    stats["requeued"] += 1

                for queue in jobs_by_queue:

                    updates = {
                        "status": "queued",
                        "dateupdated": datetime.datetime.utcnow()
                    }

                    if destination_queue is not None:
                        updates["queue"] = destination_queue

                    if action == "requeue":
                        updates["retry_count"] = 0

                    self.collection.update(
                        {"_id": {
                            "$in": jobs_by_queue[queue]
                        }}, {"$set": updates},
                        multi=True)

                    # Between these two lines, jobs can become "lost" too.

                    Queue(destination_queue or queue).enqueue_job_ids(
                        [str(x) for x in jobs_by_queue[queue]])

        print stats

        return stats
Beispiel #4
0
    def perform_action(self, action, query, destination_queue):

        stats = {
            "requeued": 0,
            "cancelled": 0
        }

        if action == "cancel":

            default_job_timeout = get_current_config()["default_job_timeout"]

            # Finding the ttl here to expire is a bit hard because we may have mixed paths
            # and hence mixed ttls.
            # If we are cancelling by path, get this ttl
            if query.get("path"):
                result_ttl = get_task_cfg(query["path"]).get("result_ttl", default_job_timeout)

            # If not, get the maxmimum ttl of all tasks.
            else:

                tasks_defs = get_current_config().get("tasks", {})
                tasks_ttls = [cfg.get("result_ttl", 0) for cfg in tasks_defs.values()]

                result_ttl = max([default_job_timeout] + tasks_ttls)

            now = datetime.datetime.utcnow()
            ret = self.collection.update(query, {"$set": {
                "status": "cancel",
                "dateexpires": now + datetime.timedelta(seconds=result_ttl),
                "dateupdated": now
            }}, multi=True)
            stats["cancelled"] = ret["n"]

            # Special case when emptying just by queue name: empty it directly!
            # In this case we could also loose some jobs that were queued after
            # the MongoDB update. They will be "lost" and requeued later like the other case
            # after the Redis BLPOP
            if query.keys() == ["queue"]:
                Queue(query["queue"]).empty()

        elif action in ("requeue", "requeue_retry"):

            # Requeue task by groups of maximum 1k items (if all in the same
            # queue)
            cursor = self.collection.find(query, projection=["_id", "queue"])

            # We must freeze the list because queries below would change it.
            # This could not fit in memory, research adding {"stats": {"$ne":
            # "queued"}} in the query
            fetched_jobs = list(cursor)

            for jobs in group_iter(fetched_jobs, n=1000):

                jobs_by_queue = defaultdict(list)
                for job in jobs:
                    jobs_by_queue[job["queue"]].append(job["_id"])
                    stats["requeued"] += 1

                for queue in jobs_by_queue:

                    updates = {
                        "status": "queued",
                        "dateupdated": datetime.datetime.utcnow()
                    }

                    if destination_queue is not None:
                        updates["queue"] = destination_queue

                    if action == "requeue":
                        updates["retry_count"] = 0

                    self.collection.update({
                        "_id": {"$in": jobs_by_queue[queue]}
                    }, {"$set": updates}, multi=True)

                    # Between these two lines, jobs can become "lost" too.

                    Queue(destination_queue or queue, add_to_known_queues=True).enqueue_job_ids(
                        [str(x) for x in jobs_by_queue[queue]])

        print stats

        return stats
Beispiel #5
0
    def perform_action(self, action, query, destination_queue):

        stats = {"requeued": 0, "cancelled": 0}

        if action == "cancel":

            default_job_timeout = get_current_config()["default_job_timeout"]

            # Finding the ttl here to expire is a bit hard because we may have mixed paths
            # and hence mixed ttls.
            # If we are cancelling by path, get this ttl
            if query.get("path"):
                result_ttl = get_task_cfg(query["path"]).get(
                    "result_ttl", default_job_timeout)

            # If not, get the maxmimum ttl of all tasks.
            else:

                tasks_defs = get_current_config().get("tasks", {})
                tasks_ttls = [
                    cfg.get("result_ttl", 0) for cfg in itervalues(tasks_defs)
                ]

                result_ttl = max([default_job_timeout] + tasks_ttls)

            now = datetime.datetime.utcnow()

            size_by_queues = defaultdict(int)
            if "queue" not in query:
                for job in self.collection.find(query, projection={"queue":
                                                                   1}):
                    size_by_queues[job["queue"]] += 1

            ret = self.collection.update(query, {
                "$set": {
                    "status": "cancel",
                    "dateexpires":
                    now + datetime.timedelta(seconds=result_ttl),
                    "dateupdated": now
                }
            },
                                         multi=True)
            stats["cancelled"] = ret["n"]

            if "queue" in query:
                if isinstance(query["queue"], str):
                    size_by_queues[query["queue"]] = ret["n"]
            set_queues_size(size_by_queues, action="decr")

            # Special case when emptying just by queue name: empty it directly!
            # In this case we could also loose some jobs that were queued after
            # the MongoDB update. They will be "lost" and requeued later like the other case
            # after the Redis BLPOP
            if list(query.keys()) == ["queue"] and isinstance(
                    query["queue"], basestring):
                Queue(query["queue"]).empty()

        elif action in ("requeue", "requeue_retry"):

            # Requeue task by groups of maximum 1k items (if all in the same
            # queue)
            status_query = query.get("status")
            if not status_query:
                query["status"] = {"$ne": "queued"}

            cursor = self.collection.find(query, projection=["_id", "queue"])

            for jobs in group_iter(cursor, n=1000):

                jobs_by_queue = defaultdict(list)
                for job in jobs:
                    jobs_by_queue[job["queue"]].append(job["_id"])
                    stats["requeued"] += 1

                for queue in jobs_by_queue:
                    updates = {
                        "status": "queued",
                        "datequeued": datetime.datetime.utcnow(),
                        "dateupdated": datetime.datetime.utcnow()
                    }

                    if destination_queue is not None:
                        updates["queue"] = destination_queue

                    if action == "requeue":
                        updates["retry_count"] = 0

                    self.collection.update(
                        {"_id": {
                            "$in": jobs_by_queue[queue]
                        }}, {"$set": updates},
                        multi=True)

                set_queues_size({
                    queue: len(jobs)
                    for queue, jobs in jobs_by_queue.items()
                })

        return stats