Example #1
0
def main():

    parser = argparse.ArgumentParser(description='Runs a task')

    cfg = config.get_config(parser=parser, config_type="run", sources=("file", "env", "args"))
    cfg["is_cli"] = True
    set_current_config(cfg)

    if len(cfg["taskargs"]) == 1:
        params = json.loads(cfg["taskargs"][0])  # pylint: disable=no-member
    else:
        params = {}

        # mrq-run taskpath a 1 b 2 => {"a": "1", "b": "2"}
        for group in utils.group_iter(cfg["taskargs"], n=2):
            if len(group) != 2:
                print("Number of arguments wasn't even")
                sys.exit(1)
            params[group[0]] = group[1]

    if cfg["queue"]:
        ret = queue_job(cfg["taskpath"], params, queue=cfg["queue"])
        print(ret)
    else:
        worker_class = load_class_by_path(cfg["worker_class"])
        job = worker_class.job_class(None)
        job.set_data({
            "path": cfg["taskpath"],
            "params": params,
            "queue": cfg["queue"]
        })
        job.datestarted = datetime.datetime.utcnow()
        set_current_job(job)
        ret = job.perform()
        print(json_stdlib.dumps(ret, cls=MongoJSONEncoder))  # pylint: disable=no-member
Example #2
0
def main():

  parser = argparse.ArgumentParser(description='Runs a task')

  cfg = config.get_config(parser=parser, config_type="run")
  cfg["is_cli"] = True
  set_current_config(cfg)
  log.info(cfg)
  if len(cfg["taskargs"]) == 1:
    params = json.loads(cfg["taskargs"][0])
  else:
    params = {}

    # mrq-run taskpath a 1 b 2 => {"a": "1", "b": "2"}
    for group in utils.group_iter(cfg["taskargs"], n=2):
      if len(group) != 2:
        print "Number of arguments wasn't even"
        sys.exit(1)
      params[group[0]] = group[1]

  if cfg["async"]:
    ret = queue.send_task(cfg["taskpath"], params, sync=False, queue=cfg["queue"])
    print ret
  else:
    worker_class = load_class_by_path(cfg["worker_class"])
    job = worker_class.job_class(None)
    job.data = {
      "path": cfg["taskpath"],
      "params": params,
      "queue": cfg["queue"]
    }
    job.datestarted = datetime.datetime.utcnow()
    set_current_job(job)
    ret = job.perform()
    print json.dumps(ret)
Example #3
0
def main():

    parser = argparse.ArgumentParser(description='Start a MRQ worker')

    cfg = config.get_config(parser=parser,
                            config_type="worker",
                            sources=("file", "env", "args"))

    set_current_config(cfg)
    set_logger_config()

    # If we are launching with a --processes option and without MRQ_IS_SUBPROCESS, we are a manager process
    if cfg["processes"] > 0 and not os.environ.get("MRQ_IS_SUBPROCESS"):

        from mrq.supervisor import Supervisor

        command = " ".join(map(pipes.quote, sys.argv))
        w = Supervisor(command, numprocs=cfg["processes"])
        w.work()
        sys.exit(w.exitcode)

    # If not, start an actual worker
    else:

        worker_class = load_class_by_path(cfg["worker_class"])
        w = worker_class()
        w.work()
        sys.exit(w.exitcode)
Example #4
0
def main():
    set_current_config(get_config())
    print "======== START ========="
    add_num = 0

    print 'all:', add_num
    print "======== END ========="
Example #5
0
def test_pause_subqueue(worker):

    # set config in current context in order to have a subqueue delimiter
    set_current_config(get_config(config_type="worker"))

    worker.start(
        queues="high high/",
        flags=
        "--subqueues_refresh_interval=1 --paused_queues_refresh_interval=1")

    Queue("high").pause()

    assert Queue("high/").is_paused()

    # wait for the paused_queues list to be refreshed
    time.sleep(2)

    job_id1 = send_task("tests.tasks.general.MongoInsert", {"a": 41},
                        queue="high")

    job_id2 = send_task("tests.tasks.general.MongoInsert", {"a": 43},
                        queue="high/subqueue")

    # wait a bit to make sure the jobs status will still be queued
    time.sleep(5)

    job1 = Job(job_id1).fetch().data
    job2 = Job(job_id2).fetch().data

    assert job1["status"] == "queued"
    assert job2["status"] == "queued"

    assert worker.mongodb_jobs.tests_inserts.count() == 0

    Queue("high/").resume()

    Job(job_id1).wait(poll_interval=0.01)

    Job(job_id2).wait(poll_interval=0.01)

    job1 = Job(job_id1).fetch().data
    job2 = Job(job_id2).fetch().data

    assert job1["status"] == "success"
    assert job1["result"] == {"a": 41}

    assert job2["status"] == "success"
    assert job2["result"] == {"a": 43}

    assert worker.mongodb_jobs.tests_inserts.count() == 2

    worker.stop()
Example #6
0
def main():

    parser = argparse.ArgumentParser(description='Start a MRQ agent')

    cfg = config.get_config(parser=parser, config_type="agent", sources=("file", "env", "args"))

    set_current_config(cfg)

    agent = Agent()

    agent.work()

    sys.exit(agent.exitcode)
Example #7
0
def test_pause_subqueue(worker):

    # set config in current context in order to have a subqueue delimiter
    set_current_config(get_config(config_type="worker"))

    worker.start(queues="high high/", flags="--subqueues_refresh_interval=1 --paused_queues_refresh_interval=1")

    Queue("high").pause()

    assert Queue("high/").is_paused()

    # wait for the paused_queues list to be refreshed
    time.sleep(2)

    job_id1 = send_task(
        "tests.tasks.general.MongoInsert", {"a": 41},
        queue="high")

    job_id2 = send_task(
        "tests.tasks.general.MongoInsert", {"a": 43},
        queue="high/subqueue")

    # wait a bit to make sure the jobs status will still be queued
    time.sleep(5)

    job1 = Job(job_id1).fetch().data
    job2 = Job(job_id2).fetch().data

    assert job1["status"] == "queued"
    assert job2["status"] == "queued"

    assert worker.mongodb_jobs.tests_inserts.count() == 0

    Queue("high/").resume()

    Job(job_id1).wait(poll_interval=0.01)

    Job(job_id2).wait(poll_interval=0.01)

    job1 = Job(job_id1).fetch().data
    job2 = Job(job_id2).fetch().data

    assert job1["status"] == "success"
    assert job1["result"] == {"a": 41}

    assert job2["status"] == "success"
    assert job2["result"] == {"a": 43}

    assert worker.mongodb_jobs.tests_inserts.count() == 2

    worker.stop()
Example #8
0
import sys
import psutil
import time
import re
import json
import urllib2

sys.path.append(os.getcwd())

from mrq.job import Job
from mrq.queue import send_tasks, send_raw_tasks, Queue
from mrq.config import get_config
from mrq.utils import wait_for_net_service
from mrq.context import connections, set_current_config

set_current_config(get_config(sources=("env")))

os.system("rm -rf dump.rdb")


class ProcessFixture(object):
  def __init__(self, request, cmdline=None, wait_port=None, quiet=False):
    self.request = request
    self.cmdline = cmdline
    self.process = None
    self.wait_port = wait_port
    self.quiet = quiet
    self.stopped = False

    self.request.addfinalizer(self.stop)
Example #9
0
from mrq.queue import Queue
from mrq.context import connections, set_current_config, get_current_config
from mrq.job import queue_job
from mrq.config import get_config

from mrq.dashboard.utils import jsonify, requires_auth

CURRENT_DIRECTORY = os.path.dirname(os.path.realpath(__file__))

parser = argparse.ArgumentParser(description='Start the MRQ dashboard')

cfg = get_config(parser=parser,
                 config_type="dashboard",
                 sources=("file", "env", "args"))
set_current_config(cfg)

app = Flask("dashboard",
            static_folder=os.path.join(CURRENT_DIRECTORY, "static"),
            template_folder=os.path.join(CURRENT_DIRECTORY, "templates"))

WHITELISTED_MRQ_CONFIG_KEYS = ["dashboard_autolink_repositories"]


@app.route('/')
@requires_auth
def root():
    return render_template("index.html",
                           MRQ_CONFIG={
                               k: v
                               for k, v in iteritems(cfg)
Example #10
0
File: app.py Project: benjisg/mrq
sys.path.insert(0, os.getcwd())

from mrq.queue import Queue
from mrq.context import connections, set_current_config, get_current_config
from mrq.job import queue_job
from mrq.config import get_config

from mrq.dashboard.utils import jsonify, requires_auth

CURRENT_DIRECTORY = os.path.dirname(os.path.realpath(__file__))

parser = argparse.ArgumentParser(description='Start the MRQ dashboard')

cfg = get_config(parser=parser, config_type="dashboard", sources=("file", "env", "args"))
set_current_config(cfg)

app = Flask(
    "dashboard",
    static_folder=os.path.join(CURRENT_DIRECTORY, "static"),
    template_folder=os.path.join(CURRENT_DIRECTORY, "templates")
)

WHITELISTED_MRQ_CONFIG_KEYS = ["dashboard_autolink_repositories"]


@app.route('/')
@requires_auth
def root():
    return render_template("index.html", MRQ_CONFIG={
        k: v for k, v in cfg.items() if k in WHITELISTED_MRQ_CONFIG_KEYS
Example #11
0
def main():

    parser = argparse.ArgumentParser(description='Start a RQ worker')

    cfg = config.get_config(parser=parser,
                            config_type="worker",
                            sources=("file", "env", "args"))

    # If we are launching with a --processes option and without the SUPERVISOR_ENABLED env
    # then we should just call supervisord.
    if cfg["processes"] > 0 and not os.environ.get("SUPERVISOR_ENABLED"):

        # We wouldn't need to do all that if supervisord supported environment
        # variables in all its config fields!
        with open(cfg["supervisord_template"], "r") as f:
            conf = f.read()

        fh, path = tempfile.mkstemp(prefix="mrqsupervisordconfig")
        f = os.fdopen(fh, "w")

        # We basically relaunch ourselves, but the config will contain the
        # MRQ_SUPERVISORD_ISWORKER env.
        conf = conf.replace("{{ SUPERVISORD_COMMAND }}", " ".join(sys.argv))
        conf = conf.replace("{{ SUPERVISORD_PROCESSES }}",
                            str(cfg["processes"]))

        f.write(conf)
        f.close()

        try:

            # start_new_session=True avoids sending the current process'
            # signals to the child.
            process = subprocess.Popen(["supervisord", "-c", path],
                                       start_new_session=True)

            def sigint_handler(signum, frame):  # pylint: disable=unused-argument

                # At this point we need to send SIGINT to all workers. Unfortunately supervisord
                # doesn't support this, so we have to find all the children pids and send them the
                # signal ourselves :-/
                # https://github.com/Supervisor/supervisor/issues/179
                #
                psutil_process = psutil.Process(process.pid)
                worker_processes = psutil_process.get_children(recursive=False)

                if len(worker_processes) == 0:
                    return process.send_signal(signal.SIGTERM)

                for child_process in worker_processes:
                    child_process.send_signal(signal.SIGINT)

                # Second time sigint is used, we should terminate supervisord itself which
                # will send SIGTERM to all the processes anyway.
                signal.signal(signal.SIGINT, sigterm_handler)

                # Wait for all the childs to finish
                for child_process in worker_processes:
                    child_process.wait()

                # Then stop supervisord itself.
                process.send_signal(signal.SIGTERM)

            def sigterm_handler(signum, frame):  # pylint: disable=unused-argument
                process.send_signal(signal.SIGTERM)

            signal.signal(signal.SIGINT, sigint_handler)
            signal.signal(signal.SIGTERM, sigterm_handler)

            process.wait()

        finally:
            os.remove(path)

    # If not, start the actual worker
    else:

        worker_class = load_class_by_path(cfg["worker_class"])

        set_current_config(cfg)

        w = worker_class()

        exitcode = w.work_loop()

        sys.exit(exitcode)
Example #12
0
import sys
import psutil
import time
import re
import json
import urllib.request, urllib.error, urllib.parse

sys.path.append(os.getcwd())

from mrq.job import Job, queue_raw_jobs, queue_jobs
from mrq.queue import Queue
from mrq.config import get_config
from mrq.utils import wait_for_net_service
from mrq.context import connections, set_current_config

set_current_config(get_config(sources=("env")))

os.system("rm -rf dump.rdb")

PYTHON_BIN = "python"
if os.environ.get("PYTHON_BIN"):
    PYTHON_BIN = os.environ["PYTHON_BIN"]


class ProcessFixture(object):
    def __init__(self, request, cmdline=None, wait_port=None, quiet=False):
        self.request = request
        self.cmdline = cmdline
        self.process = None
        self.wait_port = wait_port
        self.quiet = quiet
Example #13
0
""" MRQ """
from gevent import monkey

monkey.patch_all()
import os

import sys

from mrq.context import set_current_config

from_file = {}
config_file = os.path.join(os.getcwd(), "mrq-config.py")
sys.path.insert(0, os.path.dirname(config_file))
config_module = __import__(os.path.basename(config_file.replace(".py", "")))
sys.path.pop(0)
for k, v in config_module.__dict__.items():

    # We only keep variables starting with an uppercase character.
    if k[0].isupper():
        from_file[k.lower()] = v
set_current_config(from_file)
Example #14
0
def main():

    parser = argparse.ArgumentParser(description='Start a RQ worker')

    cfg = config.get_config(parser=parser, config_type="worker", sources=("file", "env", "args"))

    # If we are launching with a --processes option and without the SUPERVISOR_ENABLED env
    # then we should just call supervisord.
    if cfg["processes"] > 0 and not os.environ.get("SUPERVISOR_ENABLED"):

        # We wouldn't need to do all that if supervisord supported environment
        # variables in all its config fields!
        with open(cfg["supervisord_template"], "r") as f:
            conf = f.read()

        fh, path = tempfile.mkstemp(prefix="mrqsupervisordconfig")
        f = os.fdopen(fh, "w")

        # We basically relaunch ourselves, but the config will contain the
        # MRQ_SUPERVISORD_ISWORKER env.
        conf = conf.replace("{{ SUPERVISORD_COMMAND }}", " ".join(sys.argv))
        conf = conf.replace(
            "{{ SUPERVISORD_PROCESSES }}", str(cfg["processes"]))

        f.write(conf)
        f.close()

        try:

            # start_new_session=True avoids sending the current process'
            # signals to the child.
            process = subprocess.Popen(
                ["supervisord", "-c", path], start_new_session=True)

            def sigint_handler(signum, frame):  # pylint: disable=unused-argument

                # At this point we need to send SIGINT to all workers. Unfortunately supervisord
                # doesn't support this, so we have to find all the children pids and send them the
                # signal ourselves :-/
                # https://github.com/Supervisor/supervisor/issues/179
                #
                psutil_process = psutil.Process(process.pid)
                worker_processes = psutil_process.get_children(recursive=False)

                if len(worker_processes) == 0:
                    return process.send_signal(signal.SIGTERM)

                for child_process in worker_processes:
                    child_process.send_signal(signal.SIGINT)

                # Second time sigint is used, we should terminate supervisord itself which
                # will send SIGTERM to all the processes anyway.
                signal.signal(signal.SIGINT, sigterm_handler)

                # Wait for all the childs to finish
                for child_process in worker_processes:
                    child_process.wait()

                # Then stop supervisord itself.
                process.send_signal(signal.SIGTERM)

            def sigterm_handler(signum, frame):  # pylint: disable=unused-argument
                process.send_signal(signal.SIGTERM)

            signal.signal(signal.SIGINT, sigint_handler)
            signal.signal(signal.SIGTERM, sigterm_handler)

            process.wait()

        finally:
            os.remove(path)

    # If not, start the actual worker
    else:

        worker_class = load_class_by_path(cfg["worker_class"])

        set_current_config(cfg)

        w = worker_class()

        exitcode = w.work()

        sys.exit(exitcode)