def main(): parser = argparse.ArgumentParser(description='Runs a task') cfg = config.get_config(parser=parser, config_type="run", sources=("file", "env", "args")) cfg["is_cli"] = True set_current_config(cfg) if len(cfg["taskargs"]) == 1: params = json.loads(cfg["taskargs"][0]) # pylint: disable=no-member else: params = {} # mrq-run taskpath a 1 b 2 => {"a": "1", "b": "2"} for group in utils.group_iter(cfg["taskargs"], n=2): if len(group) != 2: print("Number of arguments wasn't even") sys.exit(1) params[group[0]] = group[1] if cfg["queue"]: ret = queue_job(cfg["taskpath"], params, queue=cfg["queue"]) print(ret) else: worker_class = load_class_by_path(cfg["worker_class"]) job = worker_class.job_class(None) job.set_data({ "path": cfg["taskpath"], "params": params, "queue": cfg["queue"] }) job.datestarted = datetime.datetime.utcnow() set_current_job(job) ret = job.perform() print(json_stdlib.dumps(ret, cls=MongoJSONEncoder)) # pylint: disable=no-member
def main(): parser = argparse.ArgumentParser(description='Start a MRQ worker') cfg = config.get_config(parser=parser, config_type="worker", sources=("file", "env", "args")) set_current_config(cfg) set_logger_config() # If we are launching with a --processes option and without MRQ_IS_SUBPROCESS, we are a manager process if cfg["processes"] > 0 and not os.environ.get("MRQ_IS_SUBPROCESS"): from mrq.supervisor import Supervisor command = " ".join(map(pipes.quote, sys.argv)) w = Supervisor(command, numprocs=cfg["processes"]) w.work() sys.exit(w.exitcode) # If not, start an actual worker else: worker_class = load_class_by_path(cfg["worker_class"]) w = worker_class() w.work() sys.exit(w.exitcode)
def main(): parser = argparse.ArgumentParser(description='Runs a task') cfg = config.get_config(parser=parser, config_type="run") cfg["is_cli"] = True set_current_config(cfg) log.info(cfg) if len(cfg["taskargs"]) == 1: params = json.loads(cfg["taskargs"][0]) else: params = {} # mrq-run taskpath a 1 b 2 => {"a": "1", "b": "2"} for group in utils.group_iter(cfg["taskargs"], n=2): if len(group) != 2: print "Number of arguments wasn't even" sys.exit(1) params[group[0]] = group[1] if cfg["async"]: ret = queue.send_task(cfg["taskpath"], params, sync=False, queue=cfg["queue"]) print ret else: worker_class = load_class_by_path(cfg["worker_class"]) job = worker_class.job_class(None) job.data = { "path": cfg["taskpath"], "params": params, "queue": cfg["queue"] } job.datestarted = datetime.datetime.utcnow() set_current_job(job) ret = job.perform() print json.dumps(ret)
def main(): parser = argparse.ArgumentParser(description='Start a RQ worker') cfg = config.get_config(parser=parser, config_type="worker", sources=("file", "env", "args")) # If we are launching with a --processes option and without the SUPERVISOR_ENABLED env # then we should just call supervisord. if cfg["processes"] > 0 and not os.environ.get("SUPERVISOR_ENABLED"): # We wouldn't need to do all that if supervisord supported environment # variables in all its config fields! with open(cfg["supervisord_template"], "r") as f: conf = f.read() fh, path = tempfile.mkstemp(prefix="mrqsupervisordconfig") f = os.fdopen(fh, "w") # We basically relaunch ourselves, but the config will contain the # MRQ_SUPERVISORD_ISWORKER env. conf = conf.replace("{{ SUPERVISORD_COMMAND }}", " ".join(sys.argv)) conf = conf.replace("{{ SUPERVISORD_PROCESSES }}", str(cfg["processes"])) f.write(conf) f.close() try: # start_new_session=True avoids sending the current process' # signals to the child. process = subprocess.Popen(["supervisord", "-c", path], start_new_session=True) def sigint_handler(signum, frame): # pylint: disable=unused-argument # At this point we need to send SIGINT to all workers. Unfortunately supervisord # doesn't support this, so we have to find all the children pids and send them the # signal ourselves :-/ # https://github.com/Supervisor/supervisor/issues/179 # psutil_process = psutil.Process(process.pid) worker_processes = psutil_process.get_children(recursive=False) if len(worker_processes) == 0: return process.send_signal(signal.SIGTERM) for child_process in worker_processes: child_process.send_signal(signal.SIGINT) # Second time sigint is used, we should terminate supervisord itself which # will send SIGTERM to all the processes anyway. signal.signal(signal.SIGINT, sigterm_handler) # Wait for all the childs to finish for child_process in worker_processes: child_process.wait() # Then stop supervisord itself. process.send_signal(signal.SIGTERM) def sigterm_handler(signum, frame): # pylint: disable=unused-argument process.send_signal(signal.SIGTERM) signal.signal(signal.SIGINT, sigint_handler) signal.signal(signal.SIGTERM, sigterm_handler) process.wait() finally: os.remove(path) # If not, start the actual worker else: worker_class = load_class_by_path(cfg["worker_class"]) set_current_config(cfg) w = worker_class() exitcode = w.work_loop() sys.exit(exitcode)
def main(): parser = argparse.ArgumentParser(description='Start a RQ worker') cfg = config.get_config(parser=parser, config_type="worker") # If we are launching with a --processes option and without the SUPERVISOR_ENABLED env # then we should just call supervisord. if cfg["processes"] > 0 and not os.environ.get("SUPERVISOR_ENABLED"): # We wouldn't need to do all that if supervisord supported environment variables in all its config fields! with open(cfg["supervisord_template"], "r") as f: conf = f.read() fh, path = tempfile.mkstemp(prefix="mrqsupervisordconfig") f = os.fdopen(fh, "w") # We basically relaunch ourselves, but the config will contain the MRQ_SUPERVISORD_ISWORKER env. conf = conf.replace("{{ SUPERVISORD_COMMAND }}", " ".join(sys.argv)) conf = conf.replace("{{ SUPERVISORD_PROCESSES }}", str(cfg["processes"])) f.write(conf) f.close() try: # start_new_session=True avoids sending the current process' signals to the child. process = subprocess.Popen(["supervisord", "-c", path], start_new_session=True) def sigint_handler(signum, frame): # At this point we need to send SIGINT to all workers. Unfortunately supervisord # doesn't support this, so we have to find all the children pids and send them the # signal ourselves :-/ # https://github.com/Supervisor/supervisor/issues/179 # psutil_process = psutil.Process(process.pid) worker_processes = psutil_process.get_children(recursive=False) if len(worker_processes) == 0: return process.send_signal(signal.SIGTERM) for child_process in worker_processes: child_process.send_signal(signal.SIGINT) # Second time sigint is used, we should terminate supervisord itself which # will send SIGTERM to all the processes anyway. signal.signal(signal.SIGINT, sigterm_handler) # Wait for all the childs to finish for child_process in worker_processes: child_process.wait() # Then stop supervisord itself. process.send_signal(signal.SIGTERM) def sigterm_handler(signum, frame): process.send_signal(signal.SIGTERM) signal.signal(signal.SIGINT, sigint_handler) signal.signal(signal.SIGTERM, sigterm_handler) process.wait() finally: os.remove(path) # If not, start the actual worker else: worker_class = load_class_by_path(cfg["worker_class"]) w = worker_class(cfg) exitcode = w.work_loop() sys.exit(exitcode)
def run_task(path, params, **kwargs): task_class = load_class_by_path(path) return task_class().run_wrapped(params, **kwargs)