Ejemplo n.º 1
0
def check_auth(username, pwd):
    """This function is called to check if a username /
    password combination is valid.
    """
    cfg = get_current_config()
    return username == cfg["dashboard_httpauth"].split(
        ":")[0] and pwd == cfg["dashboard_httpauth"].split(":")[1]
Ejemplo n.º 2
0
Archivo: context.py Proyecto: AshBT/mrq
 def run(self, params):
     log.info("Getting context info...")
     return {
         "job_id": get_current_job().id,
         "worker_id": get_current_worker().id,
         "config": get_current_config()
     }
Ejemplo n.º 3
0
 def run(self, params):
     log.info("Getting context info...")
     return {
         "job_id": get_current_job().id,
         "worker_id": get_current_worker().id,
         "config": get_current_config()
     }
Ejemplo n.º 4
0
Archivo: app.py Proyecto: benjisg/mrq
def api_job_action():
    params = {k: v for k, v in request.form.iteritems()}
    if params.get("status") and "-" in params.get("status"):
        params["status"] = params.get("status").split("-")
    return jsonify({"job_id": queue_job("mrq.basetasks.utils.JobAction",
                                        params,
                                        queue=get_current_config()["dashboard_queue"])})
Ejemplo n.º 5
0
Archivo: utils.py Proyecto: AshBT/mrq
def check_auth(username, pwd):
    """This function is called to check if a username /
    password combination is valid.
    """
    cfg = get_current_config()
    return username == cfg["dashboard_httpauth"].split(
        ":")[0] and pwd == cfg["dashboard_httpauth"].split(":")[1]
Ejemplo n.º 6
0
def test_context_metric_queue(worker):
    from mrq.context import get_current_config

    local_config = get_current_config()
    local_config["metric_hook"] = METRIC_HOOK
    _reset_local_metrics()

    worker.start(flags=" --config tests/fixtures/config-metric.py")

    # Will send 1 task inside!
    worker.send_task("tests.tasks.general.SendTask", {
                     "path": "tests.tasks.general.Add", "params": {"a": 41, "b": 1}})

    metrics = json.loads(
        worker.send_task("tests.tasks.general.GetMetrics", {}))

    # GetMetrics is also a task!
    assert metrics.get("queues.default.dequeued") == 3
    assert metrics.get("queues.all.dequeued") == 3
    assert metrics.get("jobs.status.started") == 3
    assert metrics.get("jobs.status.success") == 2  # At the time it is run, GetMetrics isn't success yet.

    TEST_LOCAL_METRICS.get("queues.default.enqueued") == 2
    TEST_LOCAL_METRICS.get("queues.all.enqueued") == 2

    local_config["metric_hook"] = None
Ejemplo n.º 7
0
def test_context_metric_success(worker):
    from mrq.context import get_current_config

    local_config = get_current_config()
    local_config["metric_hook"] = METRIC_HOOK
    _reset_local_metrics()

    worker.start(flags=" --config tests/fixtures/config-metric.py")

    result = worker.send_task("tests.tasks.general.Add", {"a": 41, "b": 1})
    result = worker.send_task("tests.tasks.general.Add", {"a": 41, "b": 1})

    assert result == 42

    metrics = json.loads(
        worker.send_task("tests.tasks.general.GetMetrics", {}))

    # GetMetrics is also a task!
    assert metrics.get("queues.default.dequeued") == 3
    assert metrics.get("queues.all.dequeued") == 3

    TEST_LOCAL_METRICS.get("jobs.status.queued") == 3
    assert metrics.get("jobs.status.started") == 3
    assert metrics.get("jobs.status.success") == 2  # At the time it is run, GetMetrics isn't success yet.

    local_config["metric_hook"] = None
Ejemplo n.º 8
0
def api_job_action():
    params = {k: v for k, v in iteritems(request.form)}
    if params.get("status") and "-" in params.get("status"):
        params["status"] = params.get("status").split("-")
    return jsonify({"job_id": queue_job("mrq.basetasks.utils.JobAction",
                                        params,
                                        queue=get_current_config()["dashboard_queue"])})
Ejemplo n.º 9
0
    def run(self, params):

        self.config = get_current_config()

        concurrency = int(params.get("concurrency", 5))
        groups = self.fetch_worker_group_definitions()
        if len(groups) == 0:
            log.error("No worker group definition yet. Can't orchestrate!")
            return

        subpool_map(concurrency, self.orchestrate, groups)
Ejemplo n.º 10
0
Archivo: utils.py Proyecto: AshBT/mrq
def requires_auth(f):

    cfg = get_current_config()
    if not cfg["dashboard_httpauth"]:
        return f

    @wraps(f)
    def decorated(*args, **kwargs):
        auth = request.authorization
        if not auth or not check_auth(auth.username, auth.password):
            return authenticate()
        return f(*args, **kwargs)
    return decorated
Ejemplo n.º 11
0
def requires_auth(f):

    cfg = get_current_config()
    if not cfg["dashboard_httpauth"]:
        return f

    @wraps(f)
    def decorated(*args, **kwargs):
        auth = request.authorization
        if not auth or not check_auth(auth.username, auth.password):
            return authenticate()
        return f(*args, **kwargs)
    return decorated
Ejemplo n.º 12
0
    def run(self, params):
        events = get_current_config().get("io_events")

        evts = []

        # Remove non-serializable stuff
        for evt in events:
            evt.pop("client", None)
            evt.pop("result", None)
            if evt.get("job"):
                evt["job"] = evt["job"].id
            if evt["hook"].startswith("redis_"):
                evt["key"] = evt["args"][0] if len(evt["args"]) else None
                evt["args"] = repr(evt["args"])

            # print evt
            evts.append(evt)

        return json.dumps(evts, cls=MongoJSONEncoder)
Ejemplo n.º 13
0
    def run(self, params):
        events = get_current_config().get("io_events")

        evts = []

        # Remove non-serializable stuff
        for evt in events:
            evt.pop("client", None)
            evt.pop("result", None)
            if evt.get("job"):
                evt["job"] = evt["job"].id
            if evt["hook"].startswith("redis_"):
                evt["key"] = evt["args"][0] if len(evt["args"]) else None
                evt["args"] = repr(evt["args"])
                evt.pop("options", None)

            evts.append(evt)

        return json.dumps(evts, cls=MongoJSONEncoder)
Ejemplo n.º 14
0
def api_job_traceback(job_id):
    collection = connections.mongodb_jobs.mrq_jobss
    if get_current_config().get("save_traceback_history"):

        field_sent = "traceback_history"
    else:
        field_sent = "traceback"

    job_data = collection.find_one({"_id": ObjectId(job_id)},
                                   projection=[field_sent])

    if not job_data:
        # If a job has no traceback history, we fallback onto traceback
        if field_sent == "traceback_history":
            field_sent = "traceback"
            job_data = collection.find_one({"_id": ObjectId(job_id)},
                                           projection=[field_sent])
        if not job_data:
            job_data = {}

    return jsonify(
        {field_sent: job_data.get(field_sent, "No exception raised")})
Ejemplo n.º 15
0
def api_job_traceback(job_id):
    collection = connections.mongodb_jobs.mrq_jobss
    if get_current_config().get("save_traceback_history"):

        field_sent = "traceback_history"
    else:
        field_sent = "traceback"

    job_data = collection.find_one(
        {"_id": ObjectId(job_id)}, projection=[field_sent])

    if not job_data:
        # If a job has no traceback history, we fallback onto traceback
        if field_sent == "traceback_history":
            field_sent = "traceback"
            job_data = collection.find_one(
                {"_id": ObjectId(job_id)}, projection=[field_sent])
        if not job_data:
            job_data = {}

    return jsonify({
        field_sent: job_data.get(field_sent, "No exception raised")
    })
Ejemplo n.º 16
0
from mrq.context import setup_context, run_task, get_current_config

# Autoconfigure MRQ's environment
setup_context()

print run_task("tests.tasks.general.Add", {"a": 41, "b": 1})

print get_current_config()["name"]
Ejemplo n.º 17
0
    def perform_action(self, action, query, destination_queue):

        stats = {"requeued": 0, "cancelled": 0}

        if action == "cancel":

            default_job_timeout = get_current_config()["default_job_timeout"]

            # Finding the ttl here to expire is a bit hard because we may have mixed paths
            # and hence mixed ttls.
            # If we are cancelling by path, get this ttl
            if query.get("path"):
                result_ttl = get_task_cfg(query["path"]).get(
                    "result_ttl", default_job_timeout)

            # If not, get the maxmimum ttl of all tasks.
            else:

                tasks_defs = get_current_config().get("tasks", {})
                tasks_ttls = [
                    cfg.get("result_ttl", 0) for cfg in itervalues(tasks_defs)
                ]

                result_ttl = max([default_job_timeout] + tasks_ttls)

            now = datetime.datetime.utcnow()

            size_by_queues = defaultdict(int)
            if "queue" not in query:
                for job in self.collection.find(query, projection={"queue":
                                                                   1}):
                    size_by_queues[job["queue"]] += 1

            ret = self.collection.update(query, {
                "$set": {
                    "status": "cancel",
                    "dateexpires":
                    now + datetime.timedelta(seconds=result_ttl),
                    "dateupdated": now
                }
            },
                                         multi=True)
            stats["cancelled"] = ret["n"]

            if "queue" in query:
                if isinstance(query["queue"], str):
                    size_by_queues[query["queue"]] = ret["n"]
            set_queues_size(size_by_queues, action="decr")

            # Special case when emptying just by queue name: empty it directly!
            # In this case we could also loose some jobs that were queued after
            # the MongoDB update. They will be "lost" and requeued later like the other case
            # after the Redis BLPOP
            if list(query.keys()) == ["queue"] and isinstance(
                    query["queue"], basestring):
                Queue(query["queue"]).empty()

        elif action in ("requeue", "requeue_retry"):

            # Requeue task by groups of maximum 1k items (if all in the same
            # queue)
            status_query = query.get("status")
            if not status_query:
                query["status"] = {"$ne": "queued"}

            cursor = self.collection.find(query, projection=["_id", "queue"])

            for jobs in group_iter(cursor, n=1000):

                jobs_by_queue = defaultdict(list)
                for job in jobs:
                    jobs_by_queue[job["queue"]].append(job["_id"])
                    stats["requeued"] += 1

                for queue in jobs_by_queue:
                    updates = {
                        "status": "queued",
                        "datequeued": datetime.datetime.utcnow(),
                        "dateupdated": datetime.datetime.utcnow()
                    }

                    if destination_queue is not None:
                        updates["queue"] = destination_queue

                    if action == "requeue":
                        updates["retry_count"] = 0

                    self.collection.update(
                        {"_id": {
                            "$in": jobs_by_queue[queue]
                        }}, {"$set": updates},
                        multi=True)

                set_queues_size({
                    queue: len(jobs)
                    for queue, jobs in jobs_by_queue.items()
                })

        return stats
Ejemplo n.º 18
0
Archivo: utils.py Proyecto: ggueret/mrq
def get_task_cfg(taskpath):
    return get_current_config().get("tasks", {}).get(taskpath) or {}
Ejemplo n.º 19
0
 def run(self, params):
     return json.dumps(get_current_config().get("test_global_metrics"))
Ejemplo n.º 20
0
 def run(self, params):
     return json.dumps(get_current_config().get("test_global_metrics"))
Ejemplo n.º 21
0
def metric(name, incr=1, **kwargs):
    cfg = get_current_config()
    if cfg.get("metric_hook"):
        return cfg.get("metric_hook")(name, incr=incr, **kwargs)
Ejemplo n.º 22
0
Archivo: app.py Proyecto: bossjones/mrq
def api_job_action():
  return jsonify({
    "job_id": send_task("mrq.basetasks.utils.JobAction", {k: v for k, v in request.form.iteritems()}, queue=get_current_config()["dashboard_queue"])
  })
Ejemplo n.º 23
0
    def perform_action(self, action, query, destination_queue):

        stats = {"requeued": 0, "cancelled": 0}

        if action == "cancel":

            default_job_timeout = get_current_config()["default_job_timeout"]

            # Finding the ttl here to expire is a bit hard because we may have mixed paths
            # and hence mixed ttls.
            # If we are cancelling by path, get this ttl
            if query.get("path"):
                result_ttl = get_task_cfg(query["path"]).get(
                    "result_ttl", default_job_timeout)

            # If not, get the maxmimum ttl of all tasks.
            else:

                tasks_defs = get_current_config().get("tasks", {})
                tasks_ttls = [
                    cfg.get("result_ttl", 0) for cfg in tasks_defs.values()
                ]

                result_ttl = max([default_job_timeout] + tasks_ttls)

            now = datetime.datetime.utcnow()
            ret = self.collection.update(query, {
                "$set": {
                    "status": "cancel",
                    "dateexpires":
                    now + datetime.timedelta(seconds=result_ttl),
                    "dateupdated": now
                }
            },
                                         multi=True)
            stats["cancelled"] = ret["n"]

            # Special case when emptying just by queue name: empty it directly!
            # In this case we could also loose some jobs that were queued after
            # the MongoDB update. They will be "lost" and requeued later like the other case
            # after the Redis BLPOP
            if query.keys() == ["queue"]:
                Queue(query["queue"]).empty()

        elif action in ("requeue", "requeue_retry"):

            # Requeue task by groups of maximum 1k items (if all in the same
            # queue)
            cursor = self.collection.find(query, projection=["_id", "queue"])

            # We must freeze the list because queries below would change it.
            # This could not fit in memory, research adding {"stats": {"$ne":
            # "queued"}} in the query
            fetched_jobs = list(cursor)

            for jobs in group_iter(fetched_jobs, n=1000):

                jobs_by_queue = defaultdict(list)
                for job in jobs:
                    jobs_by_queue[job["queue"]].append(job["_id"])
                    stats["requeued"] += 1

                for queue in jobs_by_queue:

                    updates = {
                        "status": "queued",
                        "dateupdated": datetime.datetime.utcnow()
                    }

                    if destination_queue is not None:
                        updates["queue"] = destination_queue

                    if action == "requeue":
                        updates["retry_count"] = 0

                    self.collection.update(
                        {"_id": {
                            "$in": jobs_by_queue[queue]
                        }}, {"$set": updates},
                        multi=True)

                    # Between these two lines, jobs can become "lost" too.

                    Queue(destination_queue or queue).enqueue_job_ids(
                        [str(x) for x in jobs_by_queue[queue]])

        print stats

        return stats
Ejemplo n.º 24
0
def get_task_cfg(taskpath):
    return get_current_config().get("tasks", {}).get(taskpath) or {}
Ejemplo n.º 25
0
app = Flask('imgfab')

AppConfig(app, os.path.abspath(os.path.join(__file__, '../settings.py')))
Bootstrap(app)

app.config.update({"DEBUG": DEBUG})

db = mongoengine.MongoEngine(app)
app.register_blueprint(social_auth)
init_social(app, db)
app.context_processor(backends)

login_manager = login.LoginManager()
login_manager.init_app(app)

if not get_current_config():
    setup_context()


@app.route("/data/facebook/albums")
@login.login_required
def data_facebook_albums():
    return json.dumps(g.user.get_facebook_albums())


@app.route("/create_job", methods=["POST"])
# @login.login_required
def create_job():
    taskpath = request.form['path']
    taskparams = json.loads(request.form['params'])
Ejemplo n.º 26
0
Archivo: utils.py Proyecto: ggueret/mrq
    def perform_action(self, action, query, destination_queue):

        stats = {
            "requeued": 0,
            "cancelled": 0
        }

        if action == "cancel":

            default_job_timeout = get_current_config()["default_job_timeout"]

            # Finding the ttl here to expire is a bit hard because we may have mixed paths
            # and hence mixed ttls.
            # If we are cancelling by path, get this ttl
            if query.get("path"):
                result_ttl = get_task_cfg(query["path"]).get("result_ttl", default_job_timeout)

            # If not, get the maxmimum ttl of all tasks.
            else:

                tasks_defs = get_current_config().get("tasks", {})
                tasks_ttls = [cfg.get("result_ttl", 0) for cfg in tasks_defs.values()]

                result_ttl = max([default_job_timeout] + tasks_ttls)

            now = datetime.datetime.utcnow()
            ret = self.collection.update(query, {"$set": {
                "status": "cancel",
                "dateexpires": now + datetime.timedelta(seconds=result_ttl),
                "dateupdated": now
            }}, multi=True)
            stats["cancelled"] = ret["n"]

            # Special case when emptying just by queue name: empty it directly!
            # In this case we could also loose some jobs that were queued after
            # the MongoDB update. They will be "lost" and requeued later like the other case
            # after the Redis BLPOP
            if query.keys() == ["queue"]:
                Queue(query["queue"]).empty()

        elif action in ("requeue", "requeue_retry"):

            # Requeue task by groups of maximum 1k items (if all in the same
            # queue)
            cursor = self.collection.find(query, projection=["_id", "queue"])

            # We must freeze the list because queries below would change it.
            # This could not fit in memory, research adding {"stats": {"$ne":
            # "queued"}} in the query
            fetched_jobs = list(cursor)

            for jobs in group_iter(fetched_jobs, n=1000):

                jobs_by_queue = defaultdict(list)
                for job in jobs:
                    jobs_by_queue[job["queue"]].append(job["_id"])
                    stats["requeued"] += 1

                for queue in jobs_by_queue:

                    updates = {
                        "status": "queued",
                        "dateupdated": datetime.datetime.utcnow()
                    }

                    if destination_queue is not None:
                        updates["queue"] = destination_queue

                    if action == "requeue":
                        updates["retry_count"] = 0

                    self.collection.update({
                        "_id": {"$in": jobs_by_queue[queue]}
                    }, {"$set": updates}, multi=True)

                    # Between these two lines, jobs can become "lost" too.

                    Queue(destination_queue or queue, add_to_known_queues=True).enqueue_job_ids(
                        [str(x) for x in jobs_by_queue[queue]])

        print stats

        return stats
Ejemplo n.º 27
0
 def run(self, params):
     key = "%s:known_queues" % get_current_config()["redis_prefix"]
     for queue in connections.redis.smembers(key):
         Queue(queue).add_to_known_queues()
Ejemplo n.º 28
0
AppConfig(app, os.path.abspath(os.path.join(__file__, '../settings.py')))
Bootstrap(app)

app.config.update({
  "DEBUG": DEBUG
})

db = mongoengine.MongoEngine(app)
app.register_blueprint(social_auth)
init_social(app, db)
app.context_processor(backends)

login_manager = login.LoginManager()
login_manager.init_app(app)

if not get_current_config():
    setup_context()


@app.route("/data/facebook/albums")
@login.login_required
def data_facebook_albums():
    return json.dumps(g.user.get_facebook_albums())


@app.route("/create_job", methods=["POST"])
# @login.login_required
def create_job():
    taskpath = request.form['path']
    taskparams = json.loads(request.form['params'])
Ejemplo n.º 29
0
 def run(self, params):
     return json.dumps(get_current_config())
Ejemplo n.º 30
0
 def redis_orchestrator_lock_key(self, worker_group):
     """ Returns the global redis key used to ensure only one agent orchestrator runs at a time """
     return "%s:orchestratorlock:%s" % (
         get_current_config()["redis_prefix"], worker_group)
Ejemplo n.º 31
0
 def run(self, params):
     return json.dumps(get_current_config())
Ejemplo n.º 32
0
 def redis_queuestats_key(self):
     """ Returns the global HSET redis key used to store queue stats """
     return "%s:queuestats" % (get_current_config()["redis_prefix"])
Ejemplo n.º 33
0
from __future__ import print_function
from mrq.context import setup_context, run_task, get_current_config

# Autoconfigure MRQ's environment
setup_context()

print(run_task("tests.tasks.general.Add", {"a": 41, "b": 1}))

print(get_current_config()["name"])
Ejemplo n.º 34
0
 def run(self, params):
   return get_current_config()