def parse_url(url): port = path = auth = userid = password = None scheme = urlparse(url).scheme parts = urlparse(url.replace("%s://" % (scheme, ), "http://")) # The first pymongo.Connection() argument (host) can be # a mongodb connection URI. If this is the case, don't # use port but let pymongo get the port(s) from the URI instead. # This enables the use of replica sets and sharding. # See pymongo.Connection() for more info. if scheme != 'mongodb': netloc = parts.netloc if '@' in netloc: auth, _, netloc = partition(parts.netloc, '@') userid, _, password = partition(auth, ':') hostname, _, port = partition(netloc, ':') path = parts.path or "" if path and path[0] == '/': path = path[1:] port = port and int(port) or port else: # strip the scheme since it is appended automatically hostname = url[len('mongodb://'):] return dict( { "hostname": hostname, "port": port or None, "userid": userid or None, "password": password or None, "transport": scheme, "virtual_host": path or None }, **kwdict(dict(parse_qsl(parts.query))))
def parse_url(url): port = path = auth = userid = password = None scheme = urlparse(url).scheme parts = urlparse(url.replace("%s://" % (scheme, ), "http://")) # The first pymongo.Connection() argument (host) can be # a mongodb connection URI. If this is the case, don't # use port but let pymongo get the port(s) from the URI instead. # This enables the use of replica sets and sharding. # See pymongo.Connection() for more info. if scheme != 'mongodb': netloc = parts.netloc if '@' in netloc: auth, _, netloc = partition(parts.netloc, '@') userid, _, password = partition(auth, ':') hostname, _, port = partition(netloc, ':') path = parts.path or "" if path and path[0] == '/': path = path[1:] port = port and int(port) or port else: # strip the scheme since it is appended automatically hostname = url[len('mongodb://'):] return dict({"hostname": hostname, "port": port or None, "userid": userid or None, "password": password or None, "transport": scheme, "virtual_host": path or None}, **kwdict(dict(parse_qsl(parts.query))))
def __init__(self, concurrency=None, loglevel=None, logfile=None, hostname=None, discard=False, run_clockservice=False, schedule=None, task_time_limit=None, task_soft_time_limit=None, max_tasks_per_child=None, queues=None, events=False, db=None, include=None, app=None, pidfile=None, redirect_stdouts=None, redirect_stdouts_level=None, autoscale=None, scheduler_cls=None, pool=None, **kwargs): self.app = app = app_or_default(app) self.concurrency = (concurrency or app.conf.CELERYD_CONCURRENCY or multiprocessing.cpu_count()) self.loglevel = loglevel or app.conf.CELERYD_LOG_LEVEL self.logfile = logfile or app.conf.CELERYD_LOG_FILE self.hostname = hostname or socket.gethostname() self.discard = discard self.run_clockservice = run_clockservice if self.app.IS_WINDOWS and self.run_clockservice: self.die("-B option does not work on Windows. " "Please run celerybeat as a separate service.") self.schedule = schedule or app.conf.CELERYBEAT_SCHEDULE_FILENAME self.scheduler_cls = scheduler_cls or app.conf.CELERYBEAT_SCHEDULER self.events = events self.task_time_limit = (task_time_limit or app.conf.CELERYD_TASK_TIME_LIMIT) self.task_soft_time_limit = (task_soft_time_limit or app.conf.CELERYD_TASK_SOFT_TIME_LIMIT) self.max_tasks_per_child = (max_tasks_per_child or app.conf.CELERYD_MAX_TASKS_PER_CHILD) self.redirect_stdouts = (redirect_stdouts or app.conf.CELERY_REDIRECT_STDOUTS) self.redirect_stdouts_level = (redirect_stdouts_level or app.conf.CELERY_REDIRECT_STDOUTS_LEVEL) self.pool = (pool or app.conf.CELERYD_POOL) self.db = db self.use_queues = queues or [] self.queues = None self.include = include or [] self.pidfile = pidfile self.autoscale = None if autoscale: max_c, _, min_c = partition(autoscale, ",") self.autoscale = [int(max_c), min_c and int(min_c) or 0] self._isatty = sys.stdout.isatty() self.colored = app.log.colored(self.logfile) if isinstance(self.use_queues, basestring): self.use_queues = self.use_queues.split(",") if isinstance(self.include, basestring): self.include = self.include.split(",") if not isinstance(self.loglevel, int): try: self.loglevel = LOG_LEVELS[self.loglevel.upper()] except KeyError: self.die("Unknown level %r. Please use one of %s." % ( self.loglevel, "|".join(l for l in LOG_LEVELS.keys() if isinstance(l, basestring))))
def _dispatch_event(self, event): self.event_count += 1 event = kwdict(event) group, _, type = partition(event.pop("type"), "-") self.group_handlers[group](type, event) if self.event_callback: self.event_callback(self, event)
def completenames(self, text, *ignored): """Return all commands starting with `text`, for tab-completion.""" names = self.get_names() first = [cmd for cmd in names if cmd.startswith(text.replace("_", "."))] if first: return first return [cmd for cmd in names if partition(cmd, ".")[2].startswith(text)]
def rate(rate): """Parses rate strings, such as `"100/m"` or `"2/h"` and converts them to seconds.""" if rate: if isinstance(rate, basestring): ops, _, modifier = partition(rate, "/") return RATE_MODIFIER_MAP[modifier or "s"](int(ops)) or 0 return rate or 0 return 0
def parse_url(url): auth = userid = password = None scheme = urlparse(url).scheme parts = urlparse(url.replace("%s://" % (scheme, ), "http://")) netloc = parts.netloc if '@' in netloc: auth, _, netloc = partition(parts.netloc, '@') userid, _, password = partition(auth, ':') hostname, _, port = partition(netloc, ':') path = parts.path or "" if path and path[0] == '/': path = path[path.index('/') + 1:] return dict({"hostname": hostname, "port": port and int(port) or None, "userid": userid or None, "password": password or None, "transport": scheme, "virtual_host": path or None}, **kwdict(dict(parse_qsl(parts.query))))
def completenames(self, text, *ignored): """Return all commands starting with `text`, for tab-completion.""" names = self.get_names() first = [ cmd for cmd in names if cmd.startswith(text.replace("_", ".")) ] if first: return first return [ cmd for cmd in names if partition(cmd, ".")[2].startswith(text) ]
def __init__(self, expires=None, backend=None, options={}, **kwargs): super(CacheBackend, self).__init__(self, **kwargs) self.expires = expires or self.app.conf.CELERY_TASK_RESULT_EXPIRES if isinstance(self.expires, timedelta): self.expires = timeutils.timedelta_seconds(self.expires) self.options = dict(self.app.conf.CELERY_CACHE_BACKEND_OPTIONS, **options) backend = backend or self.app.conf.CELERY_CACHE_BACKEND self.expires = int(self.expires) self.backend, _, servers = partition(backend, "://") self.servers = servers.split(";") try: self.Client = backends[self.backend] except KeyError: raise ImproperlyConfigured( "Unknown cache backend: %s. Please use one of the " "following backends: %s" % (self.backend, ", ".join(backends.keys())))
def __init__(self, expires=None, backend=None, options={}, **kwargs): super(CacheBackend, self).__init__(self, **kwargs) self.expires = expires or self.app.conf.CELERY_TASK_RESULT_EXPIRES if isinstance(self.expires, timedelta): self.expires = timeutils.timedelta_seconds(self.expires) self.options = dict(self.app.conf.CELERY_CACHE_BACKEND_OPTIONS, **options) backend = backend or self.app.conf.CELERY_CACHE_BACKEND self.expires = int(self.expires) self.backend, _, servers = partition(backend, "://") self.servers = servers.rstrip('/').split(";") try: self.Client = backends[self.backend]() except KeyError: raise ImproperlyConfigured( "Unknown cache backend: %s. Please use one of the " "following backends: %s" % (self.backend, ", ".join(backends.keys())))
def __init__(self, concurrency=None, loglevel=None, logfile=None, hostname=None, discard=False, run_clockservice=False, schedule=None, task_time_limit=None, task_soft_time_limit=None, max_tasks_per_child=None, queues=None, events=False, db=None, include=None, app=None, pidfile=None, redirect_stdouts=None, redirect_stdouts_level=None, autoscale=None, scheduler_cls=None, pool=None, **kwargs): self.app = app = app_or_default(app) self.concurrency = (concurrency or app.conf.CELERYD_CONCURRENCY or cpu_count()) self.loglevel = loglevel or app.conf.CELERYD_LOG_LEVEL self.logfile = logfile or app.conf.CELERYD_LOG_FILE self.hostname = hostname or socket.gethostname() self.discard = discard self.run_clockservice = run_clockservice if self.app.IS_WINDOWS and self.run_clockservice: self.die("-B option does not work on Windows. " "Please run celerybeat as a separate service.") self.schedule = schedule or app.conf.CELERYBEAT_SCHEDULE_FILENAME self.scheduler_cls = scheduler_cls or app.conf.CELERYBEAT_SCHEDULER self.events = events self.task_time_limit = (task_time_limit or app.conf.CELERYD_TASK_TIME_LIMIT) self.task_soft_time_limit = (task_soft_time_limit or app.conf.CELERYD_TASK_SOFT_TIME_LIMIT) self.max_tasks_per_child = (max_tasks_per_child or app.conf.CELERYD_MAX_TASKS_PER_CHILD) self.redirect_stdouts = (redirect_stdouts or app.conf.CELERY_REDIRECT_STDOUTS) self.redirect_stdouts_level = (redirect_stdouts_level or app.conf.CELERY_REDIRECT_STDOUTS_LEVEL) self.pool = (pool or app.conf.CELERYD_POOL) self.db = db self.use_queues = queues or [] self.queues = None self.include = include or [] self.pidfile = pidfile self.autoscale = None if autoscale: max_c, _, min_c = partition(autoscale, ",") self.autoscale = [int(max_c), min_c and int(min_c) or 0] self._isatty = sys.stdout.isatty() self.colored = app.log.colored(self.logfile) if isinstance(self.use_queues, basestring): self.use_queues = self.use_queues.split(",") if isinstance(self.include, basestring): self.include = self.include.split(",") if not isinstance(self.loglevel, int): try: self.loglevel = LOG_LEVELS[self.loglevel.upper()] except KeyError: self.die( "Unknown level %r. Please use one of %s." % (self.loglevel, "|".join(l for l in LOG_LEVELS.keys() if isinstance(l, basestring))))