def prepare_arguments(self, parser): daemon_options(parser, default_pidfile="celeryd.pid") parser.add_option("--workdir", default=None, dest="working_directory") parser.add_option("-n", "--hostname") parser.add_option( "--fake", default=False, action="store_true", dest="fake", help="Don't fork (for debugging purposes)" )
def prepare_arguments(self, parser): daemon_options(parser, default_pidfile='celeryd.pid') parser.add_option('--workdir', default=None, dest='working_directory') parser.add_option( '--fake', default=False, action='store_true', dest='fake', help="Don't fork (for debugging purposes)", )
def prepare_arguments(self, parser): daemon_options(parser, default_pidfile='celeryd.pid') parser.add_option('--workdir', default=None, dest='working_directory') parser.add_option('-n', '--hostname') parser.add_option( '--fake', default=False, action='store_true', dest='fake', help="Don't fork (for debugging purposes)", )
def prepare_arguments(self, parser): parser.add_option("-d", "--dump", action="store_true") parser.add_option("-c", "--camera") parser.add_option("--detach", action="store_true") parser.add_option("-F", "--frequency", "--freq", type="float", default=1.0) parser.add_option("-r", "--maxrate") parser.add_option("-l", "--loglevel", default="INFO") daemon_options(parser, default_pidfile="celeryev.pid") parser.add_options(self.app.user_options["events"])
def prepare_arguments(self, parser): c = self.app.conf parser.add_option('--detach', action='store_true') parser.add_option('-s', '--schedule', default=c.beat_schedule_filename) parser.add_option('--max-interval', type='float') parser.add_option('-S', '--scheduler') parser.add_option('-l', '--loglevel', default='WARN') daemon_options(parser, default_pidfile='celerybeat.pid') parser.add_options(self.app.user_options['beat'])
def add_arguments(self, parser): daemon_options(parser, default_pidfile='celeryd.pid') parser.add_argument('--workdir', default=None) parser.add_argument('-n', '--hostname') parser.add_argument( '--fake', action='store_true', default=False, help="Don't fork (for debugging purposes)", )
def prepare_arguments(self, parser): parser.add_option('-d', '--dump', action='store_true') parser.add_option('-c', '--camera') parser.add_option('--detach', action='store_true') parser.add_option('-F', '--frequency', '--freq', type='float', default=1.0) parser.add_option('-r', '--maxrate') parser.add_option('-l', '--loglevel', default='INFO') daemon_options(parser, default_pidfile='celeryev.pid') parser.add_options(self.app.user_options['events'])
def add_arguments(self, parser): daemon_options(parser, default_pidfile="celeryd.pid") parser.add_argument("--workdir", default=None) parser.add_argument("-n", "--hostname") parser.add_argument( "--fake", action="store_true", default=False, help="Don't fork (for debugging purposes)", )
def get_options(self): conf = self.app.conf return ( Option('--detach', default=False, action="store_true", dest="detach", help="Detach and run in the background."), Option('-s', '--schedule', default=conf.CELERYBEAT_SCHEDULE_FILENAME, action="store", dest="schedule", help="Path to the schedule database. The extension " "'.db' will be appended to the filename. Default: %s" % ( conf.CELERYBEAT_SCHEDULE_FILENAME, )), Option('--max-interval', default=3600.0, type="float", dest="max_interval", help="Max. seconds to sleep between schedule iterations."), Option('-S', '--scheduler', default=None, action="store", dest="scheduler_cls", help="Scheduler class. Default is " "celery.beat.PersistentScheduler"), Option('-l', '--loglevel', default=conf.CELERYBEAT_LOG_LEVEL, action="store", dest="loglevel", help="Loglevel. One of DEBUG/INFO/WARNING/ERROR/CRITICAL."), ) + daemon_options(default_pidfile="celerybeat.pid", default_logfile=conf.CELERYBEAT_LOG_FILE)
def add_arguments(self, parser): c = self.app.conf bopts = parser.add_argument_group('Beat Options') bopts.add_argument('--detach', action='store_true', default=False) bopts.add_argument( '-s', '--schedule', default=c.beat_schedule_filename) bopts.add_argument('--max-interval', type=float) bopts.add_argument('-S', '--scheduler', default=c.beat_scheduler) bopts.add_argument('-l', '--loglevel', default='WARN') daemon_options(parser, default_pidfile='celerybeat.pid') user_options = self.app.user_options['beat'] if user_options: uopts = parser.add_argument_group('User Options') self.add_compat_options(uopts, user_options)
def add_arguments(self, parser): c = self.app.conf bopts = parser.add_argument_group('Beat Options') bopts.add_argument('--detach', action='store_true', default=False) bopts.add_argument( '-s', '--schedule', default=c.beat_schedule_filename) bopts.add_argument('--max-interval', type=float) bopts.add_argument('-S', '--scheduler') bopts.add_argument('-l', '--loglevel', default='WARN') daemon_options(parser, default_pidfile='celerybeat.pid') user_options = self.app.user_options['beat'] if user_options: uopts = parser.add_argument_group('User Options') self.add_compat_options(uopts, user_options)
def get_options(self): conf = self.app.conf return ( Option('-c', '--concurrency', default=conf.CELERYD_CONCURRENCY, type='int'), Option('-P', '--pool', default=conf.CELERYD_POOL, dest='pool_cls'), Option('--purge', '--discard', default=False, action='store_true'), Option('-l', '--loglevel', default=conf.CELERYD_LOG_LEVEL), Option('-n', '--hostname'), Option('-B', '--beat', action='store_true'), Option('-s', '--schedule', dest='schedule_filename', default=conf.CELERYBEAT_SCHEDULE_FILENAME), Option('--scheduler', dest='scheduler_cls'), Option('-S', '--statedb', default=conf.CELERYD_STATE_DB, dest='state_db'), Option('-E', '--events', default=conf.CELERY_SEND_EVENTS, action='store_true', dest='send_events'), Option('--time-limit', type='float', dest='task_time_limit', default=conf.CELERYD_TASK_TIME_LIMIT), Option('--soft-time-limit', dest='task_soft_time_limit', default=conf.CELERYD_TASK_SOFT_TIME_LIMIT, type='float'), Option('--maxtasksperchild', dest='max_tasks_per_child', default=conf.CELERYD_MAX_TASKS_PER_CHILD, type='int'), Option('--queues', '-Q', default=[]), Option('--include', '-I', default=[]), Option('--autoscale'), Option('--autoreload', action='store_true'), Option('--no-execv', action='store_true', default=False), Option('-D', '--detach', action='store_true'), ) + daemon_options() + tuple(self.app.user_options['worker'])
def get_options(self): conf = self.app.conf return ( Option('-c', '--concurrency', default=conf.CELERYD_CONCURRENCY, type='int'), Option('-P', '--pool', default=conf.CELERYD_POOL, dest='pool_cls'), Option('--purge', '--discard', default=False, action='store_true'), Option('-l', '--loglevel', default=conf.CELERYD_LOG_LEVEL), Option('-n', '--hostname'), Option('-B', '--beat', action='store_true'), Option('-s', '--schedule', dest='schedule_filename', default=conf.CELERYBEAT_SCHEDULE_FILENAME), Option('--scheduler', dest='scheduler_cls'), Option('-S', '--statedb', default=conf.CELERYD_STATE_DB, dest='state_db'), Option('-E', '--events', default=conf.CELERY_SEND_EVENTS, action='store_true', dest='send_events'), Option('--time-limit', type='float', dest='task_time_limit', default=conf.CELERYD_TASK_TIME_LIMIT), Option('--soft-time-limit', dest='task_soft_time_limit', default=conf.CELERYD_TASK_SOFT_TIME_LIMIT, type='float'), Option('--maxtasksperchild', dest='max_tasks_per_child', default=conf.CELERYD_MAX_TASKS_PER_CHILD, type='int'), Option('--queues', '-Q', default=[]), Option('--exclude-queues', '-X', default=[]), Option('--include', '-I', default=[]), Option('--autoscale'), Option('--autoreload', action='store_true'), Option('--no-execv', action='store_true', default=False), Option('--without-gossip', action='store_true', default=False), Option('--without-mingle', action='store_true', default=False), Option('--without-heartbeat', action='store_true', default=False), Option('-O', dest='optimization'), Option('-D', '--detach', action='store_true'), ) + daemon_options() + tuple(self.app.user_options['worker'])
def get_options(self): conf = self.app.conf return ( Option('-c', '--concurrency', default=conf.worker_concurrency, type='int'), Option('-P', '--pool', default=conf.worker_pool, dest='pool_cls'), Option('--purge', '--discard', default=False, action='store_true'), Option('-l', '--loglevel', default='WARN'), Option('-n', '--hostname'), Option('-B', '--beat', action='store_true'), Option('-s', '--schedule', dest='schedule_filename', default=conf.beat_schedule_filename), Option('--scheduler', dest='scheduler_cls'), Option('-S', '--statedb', default=conf.worker_state_db, dest='state_db'), Option('-E', '--events', default=conf.worker_send_events, action='store_true', dest='send_events'), Option('--time-limit', type='float', dest='task_time_limit', default=conf.task_time_limit), Option('--soft-time-limit', dest='task_soft_time_limit', default=conf.task_soft_time_limit, type='float'), Option('--maxtasksperchild', dest='max_tasks_per_child', default=conf.worker_max_tasks_per_child, type='int'), Option('--prefetch-multiplier', dest='prefetch_multiplier', default=conf.worker_prefetch_multiplier, type='int'), Option('--maxmemperchild', dest='max_memory_per_child', default=conf.worker_max_memory_per_child, type='int'), Option('--queues', '-Q', default=[]), Option('--exclude-queues', '-X', default=[]), Option('--include', '-I', default=[]), Option('--autoscale'), Option('--autoreload', action='store_true'), Option('--no-execv', action='store_true', default=False), Option('--without-gossip', action='store_true', default=False), Option('--without-mingle', action='store_true', default=False), Option('--without-heartbeat', action='store_true', default=False), Option('--heartbeat-interval', type='int'), Option('-O', dest='optimization'), Option('-D', '--detach', action='store_true'), ) + daemon_options() + tuple(self.app.user_options['worker'])
def add_arguments(self, parser): c = self.app.conf bopts = parser.add_argument_group("Beat Options") bopts.add_argument("--detach", action="store_true", default=False) bopts.add_argument("-s", "--schedule", default=c.beat_schedule_filename) bopts.add_argument("--max-interval", type=float) bopts.add_argument("-S", "--scheduler", default=c.beat_scheduler) bopts.add_argument("-l", "--loglevel", default="WARN") daemon_options(parser, default_pidfile="celerybeat.pid") user_options = self.app.user_options["beat"] if user_options: uopts = parser.add_argument_group("User Options") self.add_compat_options(uopts, user_options)
def get_options(self): return ( Option('-d', '--dump', action='store_true'), Option('-c', '--camera'), Option('--detach', action='store_true'), Option('-F', '--frequency', '--freq', type='float', default=1.0), Option('-r', '--maxrate'), Option('-l', '--loglevel', default='INFO'), ) + daemon_options(default_pidfile='celeryev.pid')
def get_options(self): c = self.app.conf return ( Option('--detach', action='store_true'), Option('-s', '--schedule', default=c.CELERYBEAT_SCHEDULE_FILENAME), Option('--max-interval', type='float'), Option('-S', '--scheduler', dest='scheduler_cls'), Option('-l', '--loglevel', default=c.CELERYBEAT_LOG_LEVEL), ) + daemon_options(default_pidfile='celerybeat.pid')
def add_arguments(self, parser): dopts = parser.add_argument_group('Dumper') dopts.add_argument('-d', '--dump', action='store_true', default=False) copts = parser.add_argument_group('Snapshot') copts.add_argument('-c', '--camera') copts.add_argument('--detach', action='store_true', default=False) copts.add_argument('-F', '--frequency', '--freq', type=float, default=1.0) copts.add_argument('-r', '--maxrate') copts.add_argument('-l', '--loglevel', default='INFO') daemon_options(parser, default_pidfile='celeryev.pid') user_options = self.app.user_options['events'] if user_options: self.add_compat_options( parser.add_argument_group('User Options'), user_options)
def get_options(self): c = self.app.conf return ((Option('--detach', action='store_true'), Option('-s', '--schedule', default=c.beat_schedule_filename), Option('--max-interval', type='float'), Option('-S', '--scheduler', dest='scheduler_cls'), Option('-l', '--loglevel', default='WARN')) + daemon_options(default_pidfile='celerybeat.pid') + tuple(self.app.user_options['beat']))
def get_options(self): c = self.app.conf return ( Option("--detach", action="store_true"), Option("-s", "--schedule", default=c.CELERYBEAT_SCHEDULE_FILENAME), Option("--max-interval", type="float"), Option("-S", "--scheduler", dest="scheduler_cls"), Option("-l", "--loglevel", default=c.CELERYBEAT_LOG_LEVEL), ) + daemon_options(default_pidfile="celerybeat.pid")
class MonitorCommand(Command): namespace = 'celerymon' enable_config_from_cmdline = True preload_options = Command.preload_options + daemon_options('celerymon.pid') version = __version__ def run(self, loglevel='ERROR', logfile=None, http_port=8989, http_address='', app=None, detach=False, pidfile=None, uid=None, gid=None, umask=None, working_directory=None, **kwargs): print('celerymon %s is starting.' % self.version) app = self.app workdir = working_directory # Setup logging if not isinstance(loglevel, int): loglevel = LOG_LEVELS[loglevel.upper()] # Dump configuration to screen so we have some basic information # when users sends e-mails. print( STARTUP_INFO_FMT % { 'http_port': http_port, 'http_address': http_address or 'localhost', 'conninfo': app.broker_connection().as_uri(), }) print('celerymon has started.') set_process_title('celerymon', info=strargv(sys.argv)) def _run_monitor(): create_pidlock(pidfile) app.log.setup_logging_subsystem(loglevel=loglevel, logfile=logfile) logger = app.log.get_default_logger(name='celery.mon') monitor = MonitorService(logger=logger, http_port=http_port, http_address=http_address) try: monitor.start() except Exception, exc: logger.error('celerymon raised exception %r', exc, exc_info=True) except KeyboardInterrupt: pass
def add_arguments(self, parser): dopts = parser.add_argument_group("Dumper") dopts.add_argument("-d", "--dump", action="store_true", default=False) copts = parser.add_argument_group("Snapshot") copts.add_argument("-c", "--camera") copts.add_argument("--detach", action="store_true", default=False) copts.add_argument("-F", "--frequency", "--freq", type=float, default=1.0) copts.add_argument("-r", "--maxrate") copts.add_argument("-l", "--loglevel", default="INFO") daemon_options(parser, default_pidfile="celeryev.pid") user_options = self.app.user_options["events"] if user_options: self.add_compat_options(parser.add_argument_group("User Options"), user_options)
def get_options(self): conf = self.app.conf return ( ( Option("-c", "--concurrency", default=conf.worker_concurrency, type="int"), Option("-P", "--pool", default=conf.worker_pool, dest="pool_cls"), Option("--purge", "--discard", default=False, action="store_true"), Option("-l", "--loglevel", default="WARN"), Option("-n", "--hostname"), Option("-B", "--beat", action="store_true"), Option("-s", "--schedule", dest="schedule_filename", default=conf.beat_schedule_filename), Option("--scheduler", dest="scheduler_cls"), Option("-S", "--statedb", default=conf.worker_state_db, dest="state_db"), Option("-E", "--events", default=conf.worker_send_task_events, action="store_true", dest="send_events"), Option("--time-limit", type="float", dest="task_time_limit", default=conf.task_time_limit), Option( "--soft-time-limit", dest="task_soft_time_limit", default=conf.task_soft_time_limit, type="float" ), Option( "--maxtasksperchild", dest="max_tasks_per_child", default=conf.worker_max_tasks_per_child, type="int", ), Option( "--prefetch-multiplier", dest="prefetch_multiplier", default=conf.worker_prefetch_multiplier, type="int", ), Option( "--maxmemperchild", dest="max_memory_per_child", default=conf.worker_max_memory_per_child, type="int", ), Option("--queues", "-Q", default=[]), Option("--exclude-queues", "-X", default=[]), Option("--include", "-I", default=[]), Option("--autoscale"), Option("--autoreload", action="store_true"), Option("--no-execv", action="store_true", default=False), Option("--without-gossip", action="store_true", default=False), Option("--without-mingle", action="store_true", default=False), Option("--without-heartbeat", action="store_true", default=False), Option("--heartbeat-interval", type="int"), Option("-O", dest="optimization"), Option("-D", "--detach", action="store_true"), ) + daemon_options() + tuple(self.app.user_options["worker"]) )
def get_options(self): c = self.app.conf return ( (Option('--detach', action='store_true'), Option('-s', '--schedule', default=c.beat_schedule_filename), Option('--max-interval', type='float'), Option('-S', '--scheduler', dest='scheduler_cls'), Option('-l', '--loglevel', default='WARN')) + daemon_options(default_pidfile='celerybeat.pid') + tuple(self.app.user_options['beat']) )
class BeatCommand(Command): enable_config_from_cmdline = True supports_args = False preload_options = (Command.preload_options + daemon_options(default_pidfile="celerybeat.pid")) def run(self, detach=False, logfile=None, pidfile=None, uid=None, gid=None, umask=None, working_directory=None, **kwargs): workdir = working_directory kwargs.pop("app", None) beat = partial(self.app.Beat, logfile=logfile, pidfile=pidfile, **kwargs) if detach: with detached(logfile, pidfile, uid, gid, umask, workdir): return beat().run() else: return beat().run() def prepare_preload_options(self, options): workdir = options.get("working_directory") if workdir: os.chdir(workdir) def get_options(self): conf = self.app.conf return ( Option('--detach', default=False, action="store_true", dest="detach", help="Detach and run in the background."), Option('-s', '--schedule', default=conf.CELERYBEAT_SCHEDULE_FILENAME, action="store", dest="schedule", help="Path to the schedule database. The extension " "'.db' will be appended to the filename. Default: %s" % ( conf.CELERYBEAT_SCHEDULE_FILENAME, )), Option('--max-interval', default=None, type="float", dest="max_interval", help="Max. seconds to sleep between schedule iterations."), Option('-S', '--scheduler', default=None, action="store", dest="scheduler_cls", help="Scheduler class. Default is " "celery.beat:PersistentScheduler"), Option('-l', '--loglevel', default=conf.CELERYBEAT_LOG_LEVEL, action="store", dest="loglevel", help="Loglevel. One of DEBUG/INFO/WARNING/ERROR/CRITICAL."))
def get_options(self): conf = self.app.conf return ( ( Option('-l', '--loglevel', default=conf.CELERYMON_LOG_LEVEL, help='Choose between DEBUG/INFO/WARNING/ERROR/CRITICAL.'), Option('-P', '--port', type='int', dest='http_port', default=8989, help='Port the webserver should listen to.'), Option('-B', '--bind', dest='http_address', default='', help='Address webserver should listen to. Default (any).'), Option('-D', '--detach', action='store_true', help='Run as daemon.') ) + daemon_options('celerymon.pid') )
class MonitorCommand(Command): namespace = 'celery_stalker' enable_config_from_cmdline = True preload_options = Command.preload_options + daemon_options('celery_stalker.pid') def run(self, app=None, detach=False, pidfile=None, uid=None, gid=None, umask=None, working_directory=None, **kwargs): print('celery_stalker is starting.') app = self.app workdir = working_directory print(STARTUP_INFO_FMT % { 'conninfo': app.broker_connection().as_uri(), }) print('celery_stalker has started.') set_process_title('celery_stalker', info=strargv(sys.argv)) logfile = 'celery_stalker.log' def _run_monitor(): create_pidlock(pidfile) logger = logging.getLogger('celery_stalker') log_handler = logging.FileHandler(logfile) formatter = logging.Formatter('%(asctime)s %(levelname)s %(message)s') log_handler.setFormatter(formatter) logger.addHandler(log_handler) logger.setLevel(logging.INFO) monitor = MonitorService(logger=logger) try: monitor.start() except Exception, exc: logger.error('celery_stalker raised exception %r', exc, exc_info=True) except KeyboardInterrupt: pass
class BeatCommand(Command): doc = __doc__ enable_config_from_cmdline = True supports_args = False preload_options = (Command.preload_options + daemon_options(default_pidfile="celerybeat.pid")) def run(self, detach=False, logfile=None, pidfile=None, uid=None, gid=None, umask=None, working_directory=None, **kwargs): workdir = working_directory kwargs.pop("app", None) beat = partial(self.app.Beat, logfile=logfile, pidfile=pidfile, **kwargs) if detach: with detached(logfile, pidfile, uid, gid, umask, workdir): return beat().run() else: return beat().run() def prepare_preload_options(self, options): workdir = options.get("working_directory") if workdir: os.chdir(workdir) def get_options(self): c = self.app.conf return ( Option('--detach', action="store_true"), Option('-s', '--schedule', default=c.CELERYBEAT_SCHEDULE_FILENAME), Option('--max-interval', type="float"), Option('-S', '--scheduler', dest="scheduler_cls"), Option('-l', '--loglevel', default=c.CELERYBEAT_LOG_LEVEL), )
class MonitorCommand(Command): namespace = "celerymon" enable_config_from_cmdline = True preload_options = Command.preload_options + daemon_options("celerymon.pid") version = __version__ def run(self, loglevel="ERROR", logfile=None, http_port=8989, http_address='', app=None, detach=False, pidfile=None, uid=None, gid=None, umask=None, working_directory=None, **kwargs): print("celerymon %s is starting." % (self.version, )) app = self.app workdir = working_directory # Setup logging if not isinstance(loglevel, int): loglevel = LOG_LEVELS[loglevel.upper()] # Dump configuration to screen so we have some basic information # when users sends e-mails. print(STARTUP_INFO_FMT % { "http_port": http_port, "http_address": http_address or "localhost", "conninfo": app.broker_connection().as_uri(), }) print("celerymon has started.") set_process_title("celerymon", info=strargv(sys.argv)) def _run_monitor(): app.log.setup_logging_subsystem(loglevel=loglevel, logfile=logfile) logger = app.log.get_default_logger(name="celery.mon") monitor = MonitorService(logger=logger, http_port=http_port, http_address=http_address) try: monitor.start() except Exception, exc: logger.error("celerymon raised exception %r\n%s" % ( exc, traceback.format_exc())) except KeyboardInterrupt: pass
def get_options(self): conf = self.app.conf return ( ( Option("-c", "--concurrency", default=conf.CELERYD_CONCURRENCY, type="int"), Option("-P", "--pool", default=conf.CELERYD_POOL, dest="pool_cls"), Option("--purge", "--discard", default=False, action="store_true"), Option("-l", "--loglevel", default=conf.CELERYD_LOG_LEVEL), Option("-n", "--hostname"), Option("-B", "--beat", action="store_true"), Option("-s", "--schedule", dest="schedule_filename", default=conf.CELERYBEAT_SCHEDULE_FILENAME), Option("--scheduler", dest="scheduler_cls"), Option("-S", "--statedb", default=conf.CELERYD_STATE_DB, dest="state_db"), Option("-E", "--events", default=conf.CELERY_SEND_EVENTS, action="store_true", dest="send_events"), Option("--time-limit", type="float", dest="task_time_limit", default=conf.CELERYD_TASK_TIME_LIMIT), Option( "--soft-time-limit", dest="task_soft_time_limit", default=conf.CELERYD_TASK_SOFT_TIME_LIMIT, type="float", ), Option( "--maxtasksperchild", dest="max_tasks_per_child", default=conf.CELERYD_MAX_TASKS_PER_CHILD, type="int", ), Option("--queues", "-Q", default=[]), Option("--exclude-queues", "-X", default=[]), Option("--include", "-I", default=[]), Option("--autoscale"), Option("--autoreload", action="store_true"), Option("--no-execv", action="store_true", default=False), Option("--without-gossip", action="store_true", default=False), Option("--without-mingle", action="store_true", default=False), Option("--without-heartbeat", action="store_true", default=False), Option("-O", dest="optimization"), Option("-D", "--detach", action="store_true"), ) + daemon_options() + tuple(self.app.user_options["worker"]) )
def get_options(self): return ( Option('-d', '--dump', action="store_true", dest="dump", help="Dump events to stdout."), Option('-c', '--camera', action="store", dest="camera", help="Camera class to take event snapshots with."), Option('--detach', default=False, action="store_true", dest="detach", help="Recording: Detach and run in the background."), Option('-F', '--frequency', '--freq', action="store", dest="frequency", type="float", default=1.0, help="Recording: Snapshot frequency."), Option('-r', '--maxrate', action="store", dest="maxrate", default=None, help="Recording: Shutter rate limit (e.g. 10/m)"), Option('-l', '--loglevel', action="store", dest="loglevel", default="INFO", help="Loglevel. Default is WARNING."), ) + daemon_options(default_pidfile="celeryev.pid", default_logfile=None)
import os import sys from optparse import OptionParser, BadOptionError from celery import __version__ from celery.bin.base import daemon_options from celery.platforms import create_daemon_context OPTION_LIST = daemon_options(default_pidfile="celeryd.pid") class detached(object): def __init__(self, path, argv, logfile=None, pidfile=None, uid=None, gid=None, umask=0, working_directory=None): self.path = path self.argv = argv self.logfile = logfile self.pidfile = pidfile self.uid = uid self.gid = gid self.umask = umask self.working_directory = working_directory def start(self): context, on_stop = create_daemon_context( logfile=self.logfile, pidfile=self.pidfile, uid=self.uid, gid=self.gid, umask=self.umask, working_directory=self.working_directory,
class EvCommand(Command): supports_args = False preload_options = (Command.preload_options + daemon_options(default_pidfile="celeryev.pid")) def run(self, dump=False, camera=None, frequency=1.0, maxrate=None, loglevel="INFO", logfile=None, prog_name="celeryev", pidfile=None, uid=None, gid=None, umask=None, working_directory=None, detach=False, **kwargs): self.prog_name = prog_name if dump: return self.run_evdump() if camera: return self.run_evcam(camera, freq=frequency, maxrate=maxrate, loglevel=loglevel, logfile=logfile, pidfile=pidfile, uid=uid, gid=gid, umask=umask, working_directory=working_directory, detach=detach) return self.run_evtop() def prepare_preload_options(self, options): workdir = options.get("working_directory") if workdir: os.chdir(workdir) def run_evdump(self): from celery.events.dumper import evdump self.set_process_status("dump") return evdump(app=self.app) def run_evtop(self): from celery.events.cursesmon import evtop self.set_process_status("top") return evtop(app=self.app) def run_evcam(self, camera, logfile=None, pidfile=None, uid=None, gid=None, umask=None, working_directory=None, detach=False, **kwargs): from celery.events.snapshot import evcam workdir = working_directory self.set_process_status("cam") kwargs["app"] = self.app cam = partial(evcam, camera, logfile=logfile, pidfile=pidfile, **kwargs) if detach: with detached(logfile, pidfile, uid, gid, umask, workdir): return cam() else: return cam() def set_process_status(self, prog, info=""): prog = "%s:%s" % (self.prog_name, prog) info = "%s %s" % (info, strargv(sys.argv)) return set_process_title(prog, info=info) def get_options(self): return (Option('-d', '--dump', action="store_true", dest="dump", help="Dump events to stdout."), Option('-c', '--camera', action="store", dest="camera", help="Camera class to take event snapshots with."), Option('--detach', default=False, action="store_true", dest="detach", help="Recording: Detach and run in the background."), Option('-F', '--frequency', '--freq', action="store", dest="frequency", type="float", default=1.0, help="Recording: Snapshot frequency."), Option('-r', '--maxrate', action="store", dest="maxrate", default=None, help="Recording: Shutter rate limit (e.g. 10/m)"), Option('-l', '--loglevel', action="store", dest="loglevel", default="INFO", help="Loglevel. Default is WARNING."))
from __future__ import with_statement import os import sys from optparse import OptionParser, BadOptionError from celery import __version__ from celery.platforms import EX_FAILURE, detached from celery.utils.log import get_logger from celery.bin.base import daemon_options, Option logger = get_logger(__name__) OPTION_LIST = daemon_options(default_pidfile="celeryd.pid") + (Option( "--fake", default=False, action="store_true", dest="fake", help="Don't fork (for debugging purposes)"), ) def detach( path, argv, logfile=None, pidfile=None, uid=None, gid=None, umask=0,
class Command(CymeCommand): branch_cls = 'cyme.branch.Branch' default_detach_logfile = 'b ranch.log' default_detach_pidfile = 'branch.pid' name = 'cyme-branch' args = '[optional port number, or ipaddr:port]' help = 'Starts a cyme branch' option_list = tuple(CymeCommand().option_list) + ( Option('--broker', '-b', default=None, action='store', dest='broker', help="""Broker URL to use for the cyme message bus.\ Default is amqp://guest:guest@localhost:5672//"""), Option('--detach', default=False, action='store_true', dest='detach', help='Detach and run in the background.'), Option('-i', '--id', default=None, action='store', dest='id', help='Set explicit branch id.'), Option('-X', '--no-interaction', default=False, action='store_true', dest='no_interaction', help="Don't ask questions"), Option('--without-httpd', default=False, action='store_true', dest='without_httpd', help='Disable HTTP server'), Option('-l', '--loglevel', default='WARNING', action='store', dest='loglevel', help='Choose between DEBUG/INFO/WARNING/ERROR/CRITICAL'), Option('-D', '--instance-dir', default=None, action='store', dest='instance_dir', help='Custom instance dir. Default is instances/'), Option('-C', '--numc', default=2, action='store', type='int', dest='numc', help='Number of controllers to start. Default is 2'), Option('--sup-interval', default=60, action='store', type='int', dest='sup_interval', help='Supervisor schedule interval. Default is every minute.'), ) + daemon_options(default_detach_pidfile) _startup_pbar = None _shutdown_pbar = None def handle(self, *args, **kwargs): kwargs = self.prepare_options(**kwargs) self.loglevel = kwargs.get('loglevel') self.logfile = kwargs.get('logfile') self.enter_instance_dir() self.env.syncdb(interactive=False) self.install_cry_handler() self.install_rdb_handler() self.colored = celery.log.colored(kwargs.get('logfile')) self.branch = instantiate(self.branch_cls, *args, colored=self.colored, **kwargs) self.connect_signals() print(str(self.colored.cyan(self.banner()))) self.detached = kwargs.get('detach', False) return (self._detach if self.detached else self._start)(**kwargs) def setup_default_env(self, env): env.setup_eventlet() env.setup_pool_limit() def stop(self): self.set_process_title('shutdown...') def on_branch_ready(self, sender=None, **kwargs): if self._startup_pbar: self._startup_pbar.finish() self._startup_pbar = None pid = os.getpid() self.set_process_title('ready') if not self.detached and \ not self.branch.is_enabled_for('INFO'): print('(%s) branch ready' % (pid, )) sender.info('[READY] (%s)' % (pid, )) def on_branch_shutdown(self, sender=None, **kwargs): if self._shutdown_pbar: self._shutdown_pbar.finish() self._shutdown_pbar = None def _detach(self, logfile=None, pidfile=None, uid=None, gid=None, umask=None, working_directory=None, **kwargs): print('detaching... [pidfile=%s logfile=%s]' % (pidfile, logfile)) with detached(logfile, pidfile, uid, gid, umask, working_directory): return self._start(pidfile=pidfile) def _start(self, pidfile=None, **kwargs): self.setup_logging(logfile=self.logfile, loglevel=self.loglevel) self.set_process_title('boot') self.install_signal_handlers() if pidfile: pidlock = create_pidlock(pidfile).acquire() atexit.register(pidlock.release) try: return self.branch.start().wait() except SystemExit: self.branch.stop() def banner(self): branch = self.branch addr, port = branch.addrport con = branch.controllers try: pres_interval = con[0].thread.presence.interval except AttributeError: pres_interval = '(disabled)' sup = branch.supervisor.thread return BANNER % { 'id': branch.id, 'version': self.__version__, 'broker': branch.connection.as_uri(), 'loglevel': self.LOG_LEVELS[branch.loglevel], 'logfile': branch.logfile or '[stderr]', 'addr': addr or 'localhost', 'port': port or 8000, 'sup.interval': sup.interval, 'presence.interval': pres_interval, 'controllers': len(con), 'instance_dir': self.instance_dir } def install_signal_handlers(self): def raise_SystemExit(signum, frame): raise SystemExit() for signal in ('TERM', 'INT'): signals[signal] = raise_SystemExit def set_process_title(self, info): set_process_title('%s#%s' % (self.name, shortuuid(self.branch.id)), '%s (-D %s)' % (info, self.instance_dir)) def repr_controller_id(self, c): return shortuuid(c) + c[-2:] def connect_signals(self): sigs = self.signals sigmap = { sigs.branch_startup_request: (self.setup_startup_progress, self.setup_shutdown_progress), sigs.branch_ready: (self.on_branch_ready, ), sigs.branch_shutdown_complete: (self.on_branch_shutdown, ) } for sig, handlers in sigmap.iteritems(): for handler in handlers: sig.connect(handler, sender=self.branch) def setup_shutdown_progress(self, sender=None, **kwargs): from cyme.utils import LazyProgressBar if sender.is_enabled_for('DEBUG'): return c = self.colored sigs = (self.signals.thread_pre_shutdown, self.signals.thread_pre_join, self.signals.thread_post_join, self.signals.thread_post_shutdown) estimate = (len(sigs) * ((len(sender.components) + 1) * 2) + sum(c.thread.extra_shutdown_steps for c in sender.components)) text = c.white('Shutdown...').embed() p = self._shutdown_pbar = LazyProgressBar(estimate, text, c.reset().embed()) [sig.connect(p.step) for sig in sigs] def setup_startup_progress(self, sender=None, **kwargs): from cyme.utils import LazyProgressBar if sender.is_enabled_for('INFO'): return c = self.colored tsigs = (self.signals.thread_pre_start, self.signals.thread_post_start) osigs = (self.signals.httpd_ready, self.signals.supervisor_ready, self.signals.controller_ready, self.signals.branch_ready) estimate = (len(tsigs) + ((len(sender.components) + 10) * 2) + len(osigs)) text = c.white('Startup...').embed() p = self._startup_pbar = LazyProgressBar(estimate, text, c.reset().embed()) [sig.connect(p.step) for sig in tsigs + osigs] @cached_property def signals(self): return import_module('cyme.branch.signals')
from optparse import OptionParser, BadOptionError from celery.platforms import EX_FAILURE, detached from celery.utils import default_nodename, node_format from celery.utils.log import get_logger from celery.bin.base import daemon_options, Option __all__ = ['detached_celeryd', 'detach'] logger = get_logger(__name__) C_FAKEFORK = os.environ.get('C_FAKEFORK') OPTION_LIST = daemon_options(default_pidfile='celeryd.pid') + ( Option('--workdir', default=None, dest='working_directory'), Option('-n', '--hostname'), Option('--fake', default=False, action='store_true', dest='fake', help="Don't fork (for debugging purposes)"), ) def detach(path, argv, logfile=None, pidfile=None, uid=None,
def prepare_arguments(self, parser): conf = self.app.conf wopts = OptionGroup(parser, 'Worker Options') wopts.add_option('-n', '--hostname') wopts.add_option('-D', '--detach', action='store_true') wopts.add_option( '-S', '--statedb', default=conf.worker_state_db, ) wopts.add_option('-l', '--loglevel', default='WARN') wopts.add_option('-O', dest='optimization') wopts.add_option( '--prefetch-multiplier', type='int', default=conf.worker_prefetch_multiplier, ) parser.add_option_group(wopts) topts = OptionGroup(parser, 'Pool Options') topts.add_option( '-c', '--concurrency', default=conf.worker_concurrency, type='int', ) topts.add_option( '-P', '--pool', default=conf.worker_pool, ) topts.add_option( '-E', '--task-events', '--events', action='store_true', default=conf.worker_send_task_events, ) topts.add_option( '--time-limit', type='float', default=conf.task_time_limit, ) topts.add_option( '--soft-time-limit', type='float', default=conf.task_soft_time_limit, ) topts.add_option( '--max-tasks-per-child', '--maxtasksperchild', type='int', default=conf.worker_max_tasks_per_child, ) topts.add_option( '--max-memory-per-child', '--maxmemperchild', type='int', default=conf.worker_max_memory_per_child, ) parser.add_option_group(topts) qopts = OptionGroup(parser, 'Queue Options') qopts.add_option( '--purge', '--discard', default=False, action='store_true', ) qopts.add_option('--queues', '-Q', default=[]) qopts.add_option('--exclude-queues', '-X', default=[]) qopts.add_option('--include', '-I', default=[]) parser.add_option_group(qopts) fopts = OptionGroup(parser, 'Features') fopts.add_option( '--without-gossip', action='store_true', default=False, ) fopts.add_option( '--without-mingle', action='store_true', default=False, ) fopts.add_option( '--without-heartbeat', action='store_true', default=False, ) fopts.add_option('--heartbeat-interval', type='int') parser.add_option_group(fopts) daemon_options(parser) bopts = OptionGroup(parser, 'Embedded Beat Options') bopts.add_option('-B', '--beat', action='store_true') bopts.add_option( '-s', '--schedule-filename', '--schedule', default=conf.beat_schedule_filename, ) bopts.add_option('--scheduler') parser.add_option_group(bopts) user_options = self.app.user_options['worker'] if user_options: uopts = OptionGroup(parser, 'User Options') uopts.option_list.extend(user_options) parser.add_option_group(uopts)
def prepare_arguments(self, parser): conf = self.app.conf wopts = OptionGroup(parser, "Worker Options") wopts.add_option("-n", "--hostname") wopts.add_option("-D", "--detach", action="store_true") wopts.add_option("-S", "--statedb", default=conf.worker_state_db, dest="state_db") wopts.add_option("-l", "--loglevel", default="WARN") wopts.add_option("-O", dest="optimization") wopts.add_option( "--prefetch-multiplier", dest="prefetch_multiplier", type="int", default=conf.worker_prefetch_multiplier ) parser.add_option_group(wopts) topts = OptionGroup(parser, "Pool Options") topts.add_option("-c", "--concurrency", default=conf.worker_concurrency, type="int") topts.add_option("-P", "--pool", default=conf.worker_pool, dest="pool_cls") topts.add_option( "-E", "--events", default=conf.worker_send_task_events, action="store_true", dest="send_events" ) topts.add_option("--time-limit", type="float", dest="task_time_limit", default=conf.task_time_limit) topts.add_option( "--soft-time-limit", dest="task_soft_time_limit", type="float", default=conf.task_soft_time_limit ) topts.add_option( "--maxtasksperchild", dest="max_tasks_per_child", type="int", default=conf.worker_max_tasks_per_child ) topts.add_option( "--maxmemperchild", dest="max_memory_per_child", type="int", default=conf.worker_max_memory_per_child ) parser.add_option_group(topts) qopts = OptionGroup(parser, "Queue Options") qopts.add_option("--purge", "--discard", default=False, action="store_true") qopts.add_option("--queues", "-Q", default=[]) qopts.add_option("--exclude-queues", "-X", default=[]) qopts.add_option("--include", "-I", default=[]) parser.add_option_group(qopts) fopts = OptionGroup(parser, "Features") fopts.add_option("--autoscale") fopts.add_option("--autoreload", action="store_true") fopts.add_option("--without-gossip", action="store_true", default=False) fopts.add_option("--without-mingle", action="store_true", default=False) fopts.add_option("--without-heartbeat", action="store_true", default=False) fopts.add_option("--heartbeat-interval", type="int") parser.add_option_group(fopts) daemon_options(parser) bopts = OptionGroup(parser, "Embedded Beat Options") bopts.add_option("-B", "--beat", action="store_true") bopts.add_option("-s", "--schedule", dest="schedule_filename", default=conf.beat_schedule_filename) bopts.add_option("--scheduler", dest="scheduler_cls") parser.add_option_group(bopts) user_options = self.app.user_options["worker"] if user_options: uopts = OptionGroup(parser, "User Options") uopts.options_list.extend(user_options) parser.add_option_group(uopts)
def add_arguments(self, parser): conf = self.app.conf wopts = parser.add_argument_group('Worker Options') wopts.add_argument('-n', '--hostname') wopts.add_argument( '-D', '--detach', action='store_true', default=False, ) wopts.add_argument( '-S', '--statedb', default=conf.worker_state_db, ) wopts.add_argument('-l', '--loglevel', default='WARN') wopts.add_argument('-O', dest='optimization') wopts.add_argument( '--prefetch-multiplier', type=int, default=conf.worker_prefetch_multiplier, ) topts = parser.add_argument_group('Pool Options') topts.add_argument( '-c', '--concurrency', default=conf.worker_concurrency, type=int, ) topts.add_argument( '-P', '--pool', default=conf.worker_pool, ) topts.add_argument( '-E', '--task-events', '--events', action='store_true', default=conf.worker_send_task_events, ) topts.add_argument( '--time-limit', type=float, default=conf.task_time_limit, ) topts.add_argument( '--soft-time-limit', type=float, default=conf.task_soft_time_limit, ) topts.add_argument( '--max-tasks-per-child', '--maxtasksperchild', type=int, default=conf.worker_max_tasks_per_child, ) topts.add_argument( '--max-memory-per-child', '--maxmemperchild', type=int, default=conf.worker_max_memory_per_child, ) qopts = parser.add_argument_group('Queue Options') qopts.add_argument( '--purge', '--discard', action='store_true', default=False, ) qopts.add_argument('--queues', '-Q', default=[]) qopts.add_argument('--exclude-queues', '-X', default=[]) qopts.add_argument('--include', '-I', default=[]) fopts = parser.add_argument_group('Features') fopts.add_argument( '--without-gossip', action='store_true', default=False, ) fopts.add_argument( '--without-mingle', action='store_true', default=False, ) fopts.add_argument( '--without-heartbeat', action='store_true', default=False, ) fopts.add_argument('--heartbeat-interval', type=int) fopts.add_argument('--autoscale') daemon_options(parser) bopts = parser.add_argument_group('Embedded Beat Options') bopts.add_argument('-B', '--beat', action='store_true', default=False) bopts.add_argument( '-s', '--schedule-filename', '--schedule', default=conf.beat_schedule_filename, ) bopts.add_argument('--scheduler') user_options = self.app.user_options['worker'] if user_options: uopts = parser.add_argument_group('User Options') self.add_compat_options(uopts, user_options)
def prepare_arguments(self, parser): conf = self.app.conf wopts = OptionGroup(parser, 'Worker Options') wopts.add_option('-n', '--hostname') wopts.add_option('-D', '--detach', action='store_true') wopts.add_option( '-S', '--statedb', default=conf.worker_state_db, dest='state_db', ) wopts.add_option('-l', '--loglevel', default='WARN') wopts.add_option('-O', dest='optimization') wopts.add_option( '--prefetch-multiplier', dest='prefetch_multiplier', type='int', default=conf.worker_prefetch_multiplier, ) parser.add_option_group(wopts) topts = OptionGroup(parser, 'Pool Options') topts.add_option( '-c', '--concurrency', default=conf.worker_concurrency, type='int', ) topts.add_option( '-P', '--pool', default=conf.worker_pool, dest='pool_cls', ) topts.add_option( '-E', '--events', default=conf.worker_send_task_events, action='store_true', dest='send_events', ) topts.add_option( '--time-limit', type='float', dest='task_time_limit', default=conf.task_time_limit, ) topts.add_option( '--soft-time-limit', dest='task_soft_time_limit', type='float', default=conf.task_soft_time_limit, ) topts.add_option( '--maxtasksperchild', dest='max_tasks_per_child', type='int', default=conf.worker_max_tasks_per_child, ) topts.add_option( '--maxmemperchild', dest='max_memory_per_child', type='int', default=conf.worker_max_memory_per_child, ) parser.add_option_group(topts) qopts = OptionGroup(parser, 'Queue Options') qopts.add_option( '--purge', '--discard', default=False, action='store_true', ) qopts.add_option('--queues', '-Q', default=[]) qopts.add_option('--exclude-queues', '-X', default=[]) qopts.add_option('--include', '-I', default=[]) parser.add_option_group(qopts) fopts = OptionGroup(parser, 'Features') fopts.add_option('--autoscale') fopts.add_option('--autoreload', action='store_true') fopts.add_option( '--without-gossip', action='store_true', default=False, ) fopts.add_option( '--without-mingle', action='store_true', default=False, ) fopts.add_option( '--without-heartbeat', action='store_true', default=False, ) fopts.add_option('--heartbeat-interval', type='int') parser.add_option_group(fopts) daemon_options(parser) bopts = OptionGroup(parser, 'Embedded Beat Options') bopts.add_option('-B', '--beat', action='store_true') bopts.add_option( '-s', '--schedule', dest='schedule_filename', default=conf.beat_schedule_filename, ) bopts.add_option('--scheduler', dest='scheduler_cls') parser.add_option_group(bopts) user_options = self.app.user_options['worker'] if user_options: uopts = OptionGroup(parser, 'User Options') uopts.options_list.extend(user_options) parser.add_option_group(uopts)
def add_arguments(self, parser): conf = self.app.conf wopts = parser.add_argument_group("Worker Options") wopts.add_argument("-n", "--hostname") wopts.add_argument( "-D", "--detach", action="store_true", default=False, ) wopts.add_argument( "-S", "--statedb", default=conf.worker_state_db, ) wopts.add_argument("-l", "--loglevel", default="WARN") wopts.add_argument("-O", dest="optimization") wopts.add_argument( "--prefetch-multiplier", type=int, default=conf.worker_prefetch_multiplier, ) topts = parser.add_argument_group("Pool Options") topts.add_argument( "-c", "--concurrency", default=conf.worker_concurrency, type=int, ) topts.add_argument( "-P", "--pool", default=conf.worker_pool, ) topts.add_argument( "-E", "--task-events", "--events", action="store_true", default=conf.worker_send_task_events, ) topts.add_argument( "--time-limit", type=float, default=conf.task_time_limit, ) topts.add_argument( "--soft-time-limit", type=float, default=conf.task_soft_time_limit, ) topts.add_argument( "--max-tasks-per-child", "--maxtasksperchild", type=int, default=conf.worker_max_tasks_per_child, ) topts.add_argument( "--max-memory-per-child", "--maxmemperchild", type=int, default=conf.worker_max_memory_per_child, ) qopts = parser.add_argument_group("Queue Options") qopts.add_argument( "--purge", "--discard", action="store_true", default=False, ) qopts.add_argument("--queues", "-Q", default=[]) qopts.add_argument("--exclude-queues", "-X", default=[]) qopts.add_argument("--include", "-I", default=[]) fopts = parser.add_argument_group("Features") fopts.add_argument( "--without-gossip", action="store_true", default=False, ) fopts.add_argument( "--without-mingle", action="store_true", default=False, ) fopts.add_argument( "--without-heartbeat", action="store_true", default=False, ) fopts.add_argument("--heartbeat-interval", type=int) fopts.add_argument("--autoscale") daemon_options(parser) bopts = parser.add_argument_group("Embedded Beat Options") bopts.add_argument("-B", "--beat", action="store_true", default=False) bopts.add_argument( "-s", "--schedule-filename", "--schedule", default=conf.beat_schedule_filename, ) bopts.add_argument("--scheduler") user_options = self.app.user_options["worker"] if user_options: uopts = parser.add_argument_group("User Options") self.add_compat_options(uopts, user_options)
class EvCommand(Command): doc = __doc__ supports_args = False preload_options = (Command.preload_options + daemon_options(default_pidfile="celeryev.pid")) def run(self, dump=False, camera=None, frequency=1.0, maxrate=None, loglevel="INFO", logfile=None, prog_name="celeryev", pidfile=None, uid=None, gid=None, umask=None, working_directory=None, detach=False, **kwargs): self.prog_name = prog_name if dump: return self.run_evdump() if camera: return self.run_evcam(camera, freq=frequency, maxrate=maxrate, loglevel=loglevel, logfile=logfile, pidfile=pidfile, uid=uid, gid=gid, umask=umask, working_directory=working_directory, detach=detach) return self.run_evtop() def prepare_preload_options(self, options): workdir = options.get("working_directory") if workdir: os.chdir(workdir) def run_evdump(self): from celery.events.dumper import evdump self.set_process_status("dump") return evdump(app=self.app) def run_evtop(self): from celery.events.cursesmon import evtop self.set_process_status("top") return evtop(app=self.app) def run_evcam(self, camera, logfile=None, pidfile=None, uid=None, gid=None, umask=None, working_directory=None, detach=False, **kwargs): from celery.events.snapshot import evcam workdir = working_directory self.set_process_status("cam") kwargs["app"] = self.app cam = partial(evcam, camera, logfile=logfile, pidfile=pidfile, **kwargs) if detach: with detached(logfile, pidfile, uid, gid, umask, workdir): return cam() else: return cam() def set_process_status(self, prog, info=""): prog = "%s:%s" % (self.prog_name, prog) info = "%s %s" % (info, strargv(sys.argv)) return set_process_title(prog, info=info) def get_options(self): return ( Option('-d', '--dump', action="store_true"), Option('-c', '--camera'), Option('--detach', action="store_true"), Option('-F', '--frequency', '--freq', type="float", default=1.0), Option('-r', '--maxrate'), Option('-l', '--loglevel', default="INFO"), )