def multi_args(p, cmd='celery worker', append='', prefix='', suffix=''): names = p.values options = dict(p.options) passthrough = p.passthrough ranges = len(names) == 1 if ranges: try: noderange = int(names[0]) except ValueError: pass else: names = [str(n) for n in range(1, noderange + 1)] prefix = 'celery' cmd = options.pop('--cmd', cmd) append = options.pop('--append', append) hostname = options.pop('--hostname', options.pop('-n', socket.gethostname())) prefix = options.pop('--prefix', prefix) or '' suffix = options.pop('--suffix', suffix) or hostname if suffix in ('""', "''"): suffix = '' for ns_name, ns_opts in list(items(p.namespaces)): if ',' in ns_name or (ranges and '-' in ns_name): for subns in parse_ns_range(ns_name, ranges): p.namespaces[subns].update(ns_opts) p.namespaces.pop(ns_name) # Numbers in args always refers to the index in the list of names. # (e.g. `start foo bar baz -c:1` where 1 is foo, 2 is bar, and so on). for ns_name, ns_opts in list(items(p.namespaces)): if ns_name.isdigit(): ns_index = int(ns_name) - 1 if ns_index < 0: raise KeyError('Indexes start at 1 got: %r' % (ns_name, )) try: p.namespaces[names[ns_index]].update(ns_opts) except IndexError: raise KeyError('No node at index %r' % (ns_name, )) for name in names: this_suffix = suffix if '@' in name: this_name = options['-n'] = name nodename, this_suffix = nodesplit(name) name = nodename else: nodename = '%s%s' % (prefix, name) this_name = options['-n'] = '%s@%s' % (nodename, this_suffix) expand = abbreviations({'%h': this_name, '%n': name, '%N': nodename, '%d': this_suffix}) argv = ([expand(cmd)] + [format_opt(opt, expand(value)) for opt, value in items(p.optmerge(name, options))] + [passthrough]) if append: argv.append(expand(append)) yield multi_args_t(this_name, argv, expand, name)
def assert_next_task_data_equal(self, consumer, presult, task_name, test_eta=False, test_expires=False, properties=None, headers=None, **kwargs): next_task = consumer.queues[0].get(accept=['pickle', 'json']) task_properties = next_task.properties task_headers = next_task.headers task_body = next_task.decode() task_args, task_kwargs, embed = task_body assert task_headers['id'] == presult.id assert task_headers['task'] == task_name if test_eta: assert isinstance(task_headers.get('eta'), string_t) to_datetime = parse_iso8601(task_headers.get('eta')) assert isinstance(to_datetime, datetime) if test_expires: assert isinstance(task_headers.get('expires'), string_t) to_datetime = parse_iso8601(task_headers.get('expires')) assert isinstance(to_datetime, datetime) properties = properties or {} for arg_name, arg_value in items(properties): assert task_properties.get(arg_name) == arg_value headers = headers or {} for arg_name, arg_value in items(headers): assert task_headers.get(arg_name) == arg_value for arg_name, arg_value in items(kwargs): assert task_kwargs.get(arg_name) == arg_value
def test_utf8dict(self): uk = "foobar" d = {"følelser ær langé": "ærbadægzaååÆØÅ", from_utf8(uk): from_utf8("xuzzybaz")} for key, value in items(http.utf8dict(items(d))): self.assertIsInstance(key, str) self.assertIsInstance(value, str)
def humanize(self): """Returns a human readable string showing changes to the configuration.""" return '\n'.join( '{0}: {1}'.format(key, pretty(value, width=50)) for key, value in items(filter_hidden_settings(dict( (k, v) for k, v in items(self.without_defaults()) if k.isupper() and not k.startswith('_')))))
def test_utf8dict(self): uk = 'foobar' d = {'følelser ær langé': 'ærbadægzaååÆØÅ', from_utf8(uk): from_utf8('xuzzybaz')} for key, value in items(http.utf8dict(items(d))): self.assertIsInstance(key, str) self.assertIsInstance(value, str)
def multi_args(p, cmd="celery worker", append="", prefix="", suffix=""): names = p.values options = dict(p.options) passthrough = p.passthrough ranges = len(names) == 1 if ranges: try: noderange = int(names[0]) except ValueError: pass else: names = [str(n) for n in range(1, noderange + 1)] prefix = "celery" cmd = options.pop("--cmd", cmd) append = options.pop("--append", append) hostname = options.pop("--hostname", options.pop("-n", socket.gethostname())) prefix = options.pop("--prefix", prefix) or "" suffix = options.pop("--suffix", suffix) or hostname if suffix in ('""', "''"): suffix = "" for ns_name, ns_opts in list(items(p.namespaces)): if "," in ns_name or (ranges and "-" in ns_name): for subns in parse_ns_range(ns_name, ranges): p.namespaces[subns].update(ns_opts) p.namespaces.pop(ns_name) # Numbers in args always refers to the index in the list of names. # (e.g. `start foo bar baz -c:1` where 1 is foo, 2 is bar, and so on). for ns_name, ns_opts in list(items(p.namespaces)): if ns_name.isdigit(): ns_index = int(ns_name) - 1 if ns_index < 0: raise KeyError("Indexes start at 1 got: %r" % (ns_name,)) try: p.namespaces[names[ns_index]].update(ns_opts) except IndexError: raise KeyError("No node at index %r" % (ns_name,)) for name in names: hostname = suffix if "@" in name: nodename = options["-n"] = host_format(name) shortname, hostname = nodesplit(nodename) name = shortname else: shortname = "%s%s" % (prefix, name) nodename = options["-n"] = host_format("{0}@{1}".format(shortname, hostname)) expand = partial(node_format, nodename=nodename, N=shortname, d=hostname, h=nodename) argv = ( [expand(cmd)] + [format_opt(opt, expand(value)) for opt, value in items(p.optmerge(name, options))] + [passthrough] ) if append: argv.append(expand(append)) yield multi_args_t(nodename, argv, expand, name)
def sync(self, c): info('mingle: searching for neighbors') replies = self.send_hello(c) if replies: info('mingle: sync with %s nodes', len([reply for reply, value in items(replies) if value])) [self.on_node_reply(c, nodename, reply) for nodename, reply in items(replies) if reply] info('mingle: sync complete') else: info('mingle: all alone')
def start(self, c): info('mingle: searching for neighbors') I = c.app.control.inspect(timeout=1.0, connection=c.connection) our_revoked = c.controller.state.revoked replies = I.hello(c.hostname, our_revoked._data) or {} replies.pop(c.hostname, None) # delete my own response if replies: info('mingle: sync with %s nodes', len([reply for reply, value in items(replies) if value])) [self.on_node_reply(c, nodename, reply) for nodename, reply in items(replies) if reply] info('mingle: sync complete') else: info('mingle: all alone')
def event( self, type_, timestamp=None, local_received=None, fields=None, precedence=states.precedence, items=items, dict=dict, PENDING=states.PENDING, RECEIVED=states.RECEIVED, STARTED=states.STARTED, FAILURE=states.FAILURE, RETRY=states.RETRY, SUCCESS=states.SUCCESS, REVOKED=states.REVOKED, ): fields = fields or {} if type_ == "sent": state, self.sent = PENDING, timestamp elif type_ == "received": state, self.received = RECEIVED, timestamp elif type_ == "started": state, self.started = STARTED, timestamp elif type_ == "failed": state, self.failed = FAILURE, timestamp elif type_ == "retried": state, self.retried = RETRY, timestamp elif type_ == "succeeded": state, self.succeeded = SUCCESS, timestamp elif type_ == "revoked": state, self.revoked = REVOKED, timestamp else: state = type_.upper() # note that precedence here is reversed # see implementation in celery.states.state.__lt__ if state != RETRY and self.state != RETRY and precedence(state) > precedence(self.state): # this state logically happens-before the current state, so merge. keep = self.merge_rules.get(state) if keep is not None: fields = {k: v for k, v in items(fields) if k in keep} for key, value in items(fields): setattr(self, key, value) else: self.state = state self.timestamp = timestamp for key, value in items(fields): setattr(self, key, value)
def __new__(cls, name, bases, attrs): attrs['__confopts__'] = dict((attr, spec.get_key(attr)) for attr, spec in items(attrs) if isinstance(spec, from_config)) inherit_from = attrs.get('inherit_confopts', ()) for subcls in bases: try: attrs['__confopts__'].update(subcls.__confopts__) except AttributeError: pass for subcls in inherit_from: attrs['__confopts__'].update(subcls.__confopts__) attrs = dict((k, v if not isinstance(v, from_config) else None) for k, v in items(attrs)) return super(_configurated, cls).__new__(cls, name, bases, attrs)
def multi_args(p, cmd='celery worker', append='', prefix='', suffix=''): names = p.values options = dict(p.options) passthrough = p.passthrough ranges = len(names) == 1 if ranges: try: noderange = int(names[0]) except ValueError: pass else: names = [str(n) for n in range(1, noderange + 1)] prefix = 'celery' cmd = options.pop('--cmd', cmd) append = options.pop('--append', append) hostname = options.pop('--hostname', options.pop('-n', socket.gethostname())) prefix = options.pop('--prefix', prefix) or '' suffix = options.pop('--suffix', suffix) or hostname if suffix in ('""', "''"): suffix = '' for ns_name, ns_opts in list(items(p.namespaces)): if ',' in ns_name or (ranges and '-' in ns_name): for subns in parse_ns_range(ns_name, ranges): p.namespaces[subns].update(ns_opts) p.namespaces.pop(ns_name) for name in names: this_suffix = suffix if '@' in name: this_name = options['-n'] = name nodename, this_suffix = nodesplit(name) name = nodename else: nodename = '%s%s' % (prefix, name) this_name = options['-n'] = '%s@%s' % (nodename, this_suffix) expand = abbreviations({'%h': this_name, '%n': name, '%N': nodename, '%d': this_suffix}) argv = ([expand(cmd)] + [format_opt(opt, expand(value)) for opt, value in items(p.optmerge(name, options))] + [passthrough]) if append: argv.append(expand(append)) yield this_name, argv, expand
def test_get_many(self): for is_dict in True, False: self.b.mget_returns_dict = is_dict ids = {uuid(): i for i in range(10)} for id, i in items(ids): self.b.mark_as_done(id, i) it = self.b.get_many(list(ids), interval=0.01) for i, (got_id, got_state) in enumerate(it): assert got_state['result'] == ids[got_id] assert i == 9 assert list(self.b.get_many(list(ids), interval=0.01)) self.b._cache.clear() callback = Mock(name='callback') it = self.b.get_many( list(ids), on_message=callback, interval=0.05 ) for i, (got_id, got_state) in enumerate(it): assert got_state['result'] == ids[got_id] assert i == 9 assert list( self.b.get_many(list(ids), interval=0.01) ) callback.assert_has_calls([ call(ANY) for id in ids ])
def assertDictContainsSubset(self, expected, actual, msg=None): missing, mismatched = [], [] for key, value in items(expected): if key not in actual: missing.append(key) elif value != actual[key]: mismatched.append('%s, expected: %s, actual: %s' % ( safe_repr(key), safe_repr(value), safe_repr(actual[key]))) if not (missing or mismatched): return standard_msg = '' if missing: standard_msg = 'Missing: %s' % ','.join(map(safe_repr, missing)) if mismatched: if standard_msg: standard_msg += '; ' standard_msg += 'Mismatched values: %s' % ( ','.join(mismatched)) self.fail(self._formatMessage(msg, standard_msg))
def to_dot(self, fh, formatter=None): """Convert the graph to DOT format. :param fh: A file, or a file-like object to write the graph to. """ seen = set() draw = formatter or self.formatter def P(s): print(bytes_to_str(s), file=fh) def if_not_seen(fun, obj): if draw.label(obj) not in seen: P(fun(obj)) seen.add(draw.label(obj)) P(draw.head()) for obj, adjacent in items(self): if not adjacent: if_not_seen(draw.terminal_node, obj) for req in adjacent: if_not_seen(draw.node, obj) P(draw.edge(obj, req)) P(draw.tail())
def _find_worker_queues(self, proc): """Find the queues owned by ``proc``.""" try: return next(q for q, owner in items(self._queues) if owner == proc) except StopIteration: raise ValueError(proc)
def table(self, with_defaults=False, censored=True): filt = filter_hidden_settings if censored else lambda v: v return filt({ k: v for k, v in items( self if with_defaults else self.without_defaults()) if k.isupper() and not k.startswith('_') })
def get_many(self, task_ids, timeout=None, interval=0.5): ids = set(task_ids) cached_ids = set() for task_id in ids: try: cached = self._cache[task_id] except KeyError: pass else: if cached['status'] in states.READY_STATES: yield bytes_to_str(task_id), cached cached_ids.add(task_id) ids.difference_update(cached_ids) iterations = 0 while ids: keys = list(ids) r = self._mget_to_results(self.mget([self.get_key_for_task(k) for k in keys]), keys) self._cache.update(r) ids.difference_update(set(map(bytes_to_str, r))) for key, value in items(r): yield bytes_to_str(key), value if timeout and iterations * interval >= timeout: raise TimeoutError('Operation timed out ({0})'.format(timeout)) time.sleep(interval) # don't busy loop. iterations += 1
def annotate(self): for d in resolve_all_annotations(self.app.annotations, self): for key, value in items(d): if key.startswith('@'): self.add_around(key[1:], value) else: setattr(self, key, value)
def process_initializer(app, hostname): """Pool child process initializer.""" platforms.signals.reset(*WORKER_SIGRESET) platforms.signals.ignore(*WORKER_SIGIGNORE) platforms.set_mp_process_title("celeryd", hostname=hostname) # This is for Windows and other platforms not supporting # fork(). Note that init_worker makes sure it's only # run once per process. app.loader.init_worker() app.loader.init_worker_process() app.log.setup( int(os.environ.get("CELERY_LOG_LEVEL", 0)), os.environ.get("CELERY_LOG_FILE") or None, bool(os.environ.get("CELERY_LOG_REDIRECT", False)), str(os.environ.get("CELERY_LOG_REDIRECT_LEVEL")), ) if os.environ.get("FORKED_BY_MULTIPROCESSING"): # pool did execv after fork trace.setup_worker_optimizations(app) else: app.set_current() set_default_app(app) app.finalize() trace._tasks = app._tasks # enables fast_trace_task optimization. from celery.task.trace import build_tracer for name, task in items(app.tasks): task.__trace__ = build_tracer(name, task, app.loader, hostname) signals.worker_process_init.send(sender=None)
def process_initializer(app, hostname): """Pool child process initializer. This will initialize a child pool process to ensure the correct app instance is used and things like logging works. """ platforms.signals.reset(*WORKER_SIGRESET) platforms.signals.ignore(*WORKER_SIGIGNORE) platforms.set_mp_process_title('celeryd', hostname=hostname) # This is for Windows and other platforms not supporting # fork(). Note that init_worker makes sure it's only # run once per process. app.loader.init_worker() app.loader.init_worker_process() app.log.setup(int(os.environ.get('CELERY_LOG_LEVEL', 0) or 0), os.environ.get('CELERY_LOG_FILE') or None, bool(os.environ.get('CELERY_LOG_REDIRECT', False)), str(os.environ.get('CELERY_LOG_REDIRECT_LEVEL'))) if os.environ.get('FORKED_BY_MULTIPROCESSING'): # pool did execv after fork trace.setup_worker_optimizations(app) else: app.set_current() set_default_app(app) app.finalize() trace._tasks = app._tasks # enables fast_trace_task optimization. # rebuild execution handler for all tasks. from celery.app.trace import build_tracer for name, task in items(app.tasks): task.__trace__ = build_tracer(name, task, app.loader, hostname, app=app) signals.worker_process_init.send(sender=None)
def _apply_chord_incr(self, header, partial_args, group_id, body, result=None, options={}, **kwargs): self.save_group(group_id, self.app.GroupResult(group_id, result)) fixed_options = {k: v for k, v in items(options) if k != 'task_id'} return header(*partial_args, task_id=group_id, **fixed_options or {})
def extend_with_default_kwargs(self): """Extend the tasks keyword arguments with standard task arguments. Currently these are `logfile`, `loglevel`, `task_id`, `task_name`, `task_retries`, and `delivery_info`. See :meth:`celery.task.base.Task.run` for more information. Magic keyword arguments are deprecated and will be removed in version 4.0. """ kwargs = dict(self.kwargs) default_kwargs = {'logfile': None, # deprecated 'loglevel': None, # deprecated 'task_id': self.id, 'task_name': self.name, 'task_retries': self.request_dict.get('retries', 0), 'task_is_eager': False, 'delivery_info': self.delivery_info} fun = self.task.run supported_keys = fun_takes_kwargs(fun, default_kwargs) extend_with = dict((key, val) for key, val in items(default_kwargs) if key in supported_keys) kwargs.update(extend_with) return kwargs
def _info_for_commandclass(type_): from celery.worker.control import Panel return [ (name, info) for name, info in items(Panel.meta) if info.type == type_ ]
def make_request(self, url, method, params): """Perform HTTP request and return the response.""" request = Request(url, str_to_bytes(params)) for key, val in items(self.http_headers): request.add_header(key, val) response = urlopen(request) # user catches errors. return response.read()
def patch_settings(app=None, **config): if app is None: from celery import current_app app = current_app prev = {} for key, value in items(config): try: prev[key] = getattr(app.conf, key) except AttributeError: pass setattr(app.conf, key, value) yield app.conf for key, value in items(prev): setattr(app.conf, key, value)
def get_many(self, task_ids, timeout=None, interval=0.5, no_ack=True, on_message=None, on_interval=None, READY_STATES=states.READY_STATES): interval = 0.5 if interval is None else interval ids = task_ids if isinstance(task_ids, set) else set(task_ids) cached_ids = set() cache = self._cache for task_id in ids: try: cached = cache[task_id] except KeyError: pass else: if cached['status'] in READY_STATES: yield bytes_to_str(task_id), cached cached_ids.add(task_id) ids.difference_update(cached_ids) iterations = 0 while ids: keys = list(ids) r = self._mget_to_results(self.mget([self.get_key_for_task(k) for k in keys]), keys) cache.update(r) ids.difference_update({bytes_to_str(v) for v in r}) for key, value in items(r): if on_message is not None: on_message(value) yield bytes_to_str(key), value if timeout and iterations * interval >= timeout: raise TimeoutError('Operation timed out ({0})'.format(timeout)) if on_interval: on_interval() time.sleep(interval) # don't busy loop. iterations += 1
def lpmerge(L, R): """In place left precedent dictionary merge. Keeps values from `L`, if the value in `R` is :const:`None`.""" setitem = L.__setitem__ [setitem(k, v) for k, v in items(R) if v is not None] return L
def cry(out=None, sepchr='=', seplen=49): # pragma: no cover """Return stack-trace of all active threads, taken from https://gist.github.com/737056.""" import threading out = WhateverIO() if out is None else out P = partial(print, file=out) # get a map of threads by their ID so we can print their names # during the traceback dump tmap = {t.ident: t for t in threading.enumerate()} sep = sepchr * seplen for tid, frame in items(sys._current_frames()): thread = tmap.get(tid) if not thread: # skip old junk (left-overs from a fork) continue P('{0.name}'.format(thread)) P(sep) traceback.print_stack(frame, file=out) P(sep) P('LOCAL VARIABLES') P(sep) pprint(frame.f_locals, stream=out) P('\n') return out.getvalue()
def get_process_queues(self): """Get queues for a new process. Here we'll find an unused slot, as there should always be one available when we start a new process. """ return next(q for q, owner in items(self._queues) if owner is None)
def update(self, other): """Update this set from other LimitedSet, dict or iterable.""" if not other: return if isinstance(other, LimitedSet): self._data.update(other._data) self._refresh_heap() self.purge() elif isinstance(other, dict): # revokes are sent as a dict for key, inserted in items(other): if isinstance(inserted, (tuple, list)): # in case someone uses ._data directly for sending update inserted = inserted[0] if not isinstance(inserted, float): raise ValueError( 'Expecting float timestamp, got type ' '{0!r} with value: {1}'.format( type(inserted), inserted)) self.add(key, inserted) else: # XXX AVOID THIS, it could keep old data if more parties # exchange them all over and over again for obj in other: self.add(obj)
def workers(self, *args, **kwargs): def simplearg(arg): return maybe_list(itemgetter(0, 2)(arg.partition(':'))) def maybe_list(l, sep=','): return (l[0], l[1].split(sep) if sep in l[1] else l[1]) args = dict(simplearg(arg) for arg in args) generic = 'generic' in args def generic_label(node): return '{0} ({1}://)'.format( type(node).__name__, node._label.split('://')[0]) class Node(object): force_label = None scheme = {} def __init__(self, label, pos=None): self._label = label self.pos = pos def label(self): return self._label def __str__(self): return self.label() class Thread(Node): scheme = { 'fillcolor': 'lightcyan4', 'fontcolor': 'yellow', 'shape': 'oval', 'fontsize': 10, 'width': 0.3, 'color': 'black' } def __init__(self, label, **kwargs): self._label = 'thr-{0}'.format(next(tids)) self.real_label = label self.pos = 0 class Formatter(GraphFormatter): def label(self, obj): return obj and obj.label() def node(self, obj): scheme = dict(obj.scheme) if obj.pos else obj.scheme if isinstance(obj, Thread): scheme['label'] = obj.real_label return self.draw_node( obj, dict(self.node_scheme, **scheme), ) def terminal_node(self, obj): return self.draw_node( obj, dict(self.term_scheme, **obj.scheme), ) def edge(self, a, b, **attrs): if isinstance(a, Thread): attrs.update(arrowhead='none', arrowtail='tee') return self.draw_edge(a, b, self.edge_scheme, attrs) def subscript(n): S = { '0': '₀', '1': '₁', '2': '₂', '3': '₃', '4': '₄', '5': '₅', '6': '₆', '7': '₇', '8': '₈', '9': '₉' } return ''.join([S[i] for i in str(n)]) class Worker(Node): pass class Backend(Node): scheme = { 'shape': 'folder', 'width': 2, 'height': 1, 'color': 'black', 'fillcolor': 'peachpuff3', 'color': 'peachpuff4' } def label(self): return generic_label(self) if generic else self._label class Broker(Node): scheme = { 'shape': 'circle', 'fillcolor': 'cadetblue3', 'color': 'cadetblue4', 'height': 1 } def label(self): return generic_label(self) if generic else self._label from itertools import count tids = count(1) Wmax = int(args.get('wmax', 4) or 0) Tmax = int(args.get('tmax', 3) or 0) def maybe_abbr(l, name, max=Wmax): size = len(l) abbr = max and size > max if 'enumerate' in args: l = [ '{0}{1}'.format(name, subscript(i + 1)) for i, obj in enumerate(l) ] if abbr: l = l[0:max - 1] + [l[size - 1]] l[max - 2] = '{0}⎨…{1}⎬'.format(name[0], subscript(size - (max - 1))) return l try: workers = args['nodes'] threads = args.get('threads') or [] except KeyError: replies = self.app.control.inspect().stats() workers, threads = [], [] for worker, reply in items(replies): workers.append(worker) threads.append(reply['pool']['max-concurrency']) wlen = len(workers) backend = args.get('backend', self.app.conf.result_backend) threads_for = {} workers = maybe_abbr(workers, 'Worker') if Wmax and wlen > Wmax: threads = threads[0:3] + [threads[-1]] for i, threads in enumerate(threads): threads_for[workers[i]] = maybe_abbr( list(range(int(threads))), 'P', Tmax, ) broker = Broker( args.get('broker', self.app.connection_for_read().as_uri())) backend = Backend(backend) if backend else None graph = DependencyGraph(formatter=Formatter()) graph.add_arc(broker) if backend: graph.add_arc(backend) curworker = [0] for i, worker in enumerate(workers): worker = Worker(worker, pos=i) graph.add_arc(worker) graph.add_edge(worker, broker) if backend: graph.add_edge(worker, backend) threads = threads_for.get(worker._label) if threads: for thread in threads: thread = Thread(thread) graph.add_arc(thread) graph.add_edge(thread, worker) curworker[0] += 1 graph.to_dot(self.stdout)
def _update_model_with_dict(self, obj, fields): [setattr(obj, attr_name, attr_value) for attr_name, attr_value in items(fields)] obj.save() return obj
def draw(self): with self.lock: win = self.win self.handle_keypress() x = LEFT_BORDER_OFFSET y = blank_line = count(2) my, mx = win.getmaxyx() win.erase() win.bkgd(' ', curses.color_pair(1)) win.border() win.addstr(1, x, self.greet, curses.A_DIM | curses.color_pair(5)) next(blank_line) win.addstr(next(y), x, self.format_row('UUID', 'TASK', 'WORKER', 'TIME', 'STATE'), curses.A_BOLD | curses.A_UNDERLINE) tasks = self.tasks if tasks: for row, (uuid, task) in enumerate(tasks): if row > self.display_height: break if task.uuid: lineno = next(y) self.display_task_row(lineno, task) # -- Footer next(blank_line) win.hline(my - 6, x, curses.ACS_HLINE, self.screen_width - 4) # Selected Task Info if self.selected_task: win.addstr(my - 5, x, self.selected_str, curses.A_BOLD) info = 'Missing extended info' detail = '' try: selection = self.state.tasks[self.selected_task] except KeyError: pass else: info = selection.info() if 'runtime' in info: info['runtime'] = '{0:.2f}'.format(info['runtime']) if 'result' in info: info['result'] = abbr(info['result'], 16) info = ' '.join( '{0}={1}'.format(key, value) for key, value in items(info) ) detail = '... -> key i' infowin = abbr(info, self.screen_width - len(self.selected_str) - 2, detail) win.addstr(my - 5, x + len(self.selected_str), infowin) # Make ellipsis bold if detail in infowin: detailpos = len(infowin) - len(detail) win.addstr(my - 5, x + len(self.selected_str) + detailpos, detail, curses.A_BOLD) else: win.addstr(my - 5, x, 'No task selected', curses.A_NORMAL) # Workers if self.workers: win.addstr(my - 4, x, self.online_str, curses.A_BOLD) win.addstr(my - 4, x + len(self.online_str), ', '.join(sorted(self.workers)), curses.A_NORMAL) else: win.addstr(my - 4, x, 'No workers discovered.') # Info win.addstr(my - 3, x, self.info_str, curses.A_BOLD) win.addstr( my - 3, x + len(self.info_str), STATUS_SCREEN.format( s=self.state, w_alive=len([w for w in values(self.state.workers) if w.alive]), w_all=len(self.state.workers), ), curses.A_DIM, ) # Help self.safe_add_str(my - 2, x, self.help_title, curses.A_BOLD) self.safe_add_str(my - 2, x + len(self.help_title), self.help, curses.A_DIM) win.refresh()
def workers(self): return [hostname for hostname, w in items(self.state.workers) if w.alive]
def expand(S): ret = S if S is not None: for short_opt, long_opt in items(mapping): ret = ret.replace(short_opt, long_opt) return ret
def attrs(self, d, scheme=None): d = dict(self.scheme, **dict(scheme, **d or {}) if scheme else d) return self._attrsep.join( safe_str(self.attr(k, v)) for k, v in items(d) )
def _kwargs_to_command_line(kwargs): return {('--{0}'.format(k.replace('_', '-')) if len(k) > 1 else '-{0}'.format(k)): '{0}'.format(v) for k, v in items(kwargs)}
def __init__(self, **kwargs): for key, value in items(kwargs): setattr(self, key, value)
def _refresh_heap(self): self._heap[:] = [(t, key) for key, t in items(self._data)] heapify(self._heap)
def table(self, with_defaults=False, censored=True): filt = filter_hidden_settings if censored else lambda v: v return filt( dict((k, v) for k, v in items( self if with_defaults else self.without_defaults()) if k.isupper() and not k.startswith('_')))
def Namespace(__old__=None, **options): if __old__ is not None: for key, opt in items(options): if not opt.old: opt.old = {o.format(key) for o in __old__} return options
from case import skip from decimal import Decimal from pprint import pprint from celery.five import ( items, long_t, python_2_unicode_compatible, text_t, values, ) from celery.utils.saferepr import saferepr D_NUMBERS = { b'integer': 1, b'float': 1.3, b'decimal': Decimal('1.3'), b'long': long_t(4), b'complex': complex(13.3), } D_INT_KEYS = {v: k for k, v in items(D_NUMBERS)} QUICK_BROWN_FOX = 'The quick brown fox jumps over the lazy dog.' B_QUICK_BROWN_FOX = b'The quick brown fox jumps over the lazy dog.' D_TEXT = { b'foo': QUICK_BROWN_FOX, b'bar': B_QUICK_BROWN_FOX, b'baz': B_QUICK_BROWN_FOX, b'xuzzy': B_QUICK_BROWN_FOX, } L_NUMBERS = list(values(D_NUMBERS)) D_TEXT_LARGE = { b'bazxuzzyfoobarlongverylonglong': QUICK_BROWN_FOX * 30,
def _iterate_items(self): return items(self.adjacent)
def __repr__(self): return repr(dict(items(self)))
def utf8dict(d, encoding='utf-8'): return { k.decode(encoding) if isinstance(k, bytes) else k: v for k, v in items(d) }
def edges(self): """Return generator that yields for all edges in the graph.""" return (obj for obj, adj in items(self) if adj)
def _deserialize_Task_WeakSet_Mapping(mapping, tasks): return { name: WeakSet(tasks[i] for i in ids if i in tasks) for name, ids in items(mapping or {}) }
def itertasks(self, limit=None): for index, row in enumerate(items(self.tasks)): yield row if limit and index + 1 >= limit: break
def _serialize_Task_WeakSet_Mapping(mapping): return {name: [t.id for t in tasks] for name, tasks in items(mapping)}
def reset_rate_limits(self): self.task_buckets.update( (n, self.bucket_for_task(t)) for n, t in items(self.app.tasks))
def update(self, f, **kw): for k, v in items(dict(f, **kw) if kw else f): setattr(self, k, v)
def merge(self, state, timestamp, fields): keep = self.merge_rules.get(state) if keep is not None: fields = dict((k, v) for k, v in items(fields) if k in keep) for key, value in items(fields): setattr(self, key, value)
def __init__(self, uuid=None, **kwargs): self.uuid = uuid if kwargs: for k, v in items(kwargs): setattr(self, k, v)
def _update_ns_ranges(self, p, ranges): for ns_name, ns_opts in list(items(p.namespaces)): if ',' in ns_name or (ranges and '-' in ns_name): for subns in self._parse_ns_range(ns_name, ranges): p.namespaces[subns].update(ns_opts) p.namespaces.pop(ns_name)
def __init__(self, *args, **kwargs): attrs = kwargs.pop('attrs', None) or {} super(Mock, self).__init__(*args, **kwargs) for attr_name, attr_value in items(attrs): setattr(self, attr_name, attr_value)
def humanize(self, with_defaults=False, censored=True): """Return a human readable text showing configuration changes.""" return '\n'.join( '{0}: {1}'.format(key, pretty(value, width=50)) for key, value in items(self.table(with_defaults, censored)))
def mixin_template(template, conf): cls = symbol_by_name(templates[template]) conf.update( dict((k, v) for k, v in items(vars(cls)) if k.isupper() and not k.startswith('_')))
def __init__(self, default=None, *args, **kwargs): self.default = default self.type = kwargs.get('type') or 'string' for attr, value in items(kwargs): setattr(self, attr, value)
def multi_args(p, cmd='celery worker', append='', prefix='', suffix=''): names = p.values options = dict(p.options) passthrough = p.passthrough ranges = len(names) == 1 if ranges: try: noderange = int(names[0]) except ValueError: pass else: names = [str(n) for n in range(1, noderange + 1)] prefix = 'celery' cmd = options.pop('--cmd', cmd) append = options.pop('--append', append) hostname = options.pop('--hostname', options.pop('-n', socket.gethostname())) prefix = options.pop('--prefix', prefix) or '' suffix = options.pop('--suffix', suffix) or hostname if suffix in ('""', "''"): suffix = '' for ns_name, ns_opts in list(items(p.namespaces)): if ',' in ns_name or (ranges and '-' in ns_name): for subns in parse_ns_range(ns_name, ranges): p.namespaces[subns].update(ns_opts) p.namespaces.pop(ns_name) # Numbers in args always refers to the index in the list of names. # (e.g. `start foo bar baz -c:1` where 1 is foo, 2 is bar, and so on). for ns_name, ns_opts in list(items(p.namespaces)): if ns_name.isdigit(): ns_index = int(ns_name) - 1 if ns_index < 0: raise KeyError('Indexes start at 1 got: %r' % (ns_name, )) try: p.namespaces[names[ns_index]].update(ns_opts) except IndexError: raise KeyError('No node at index %r' % (ns_name, )) for name in names: this_suffix = suffix if '@' in name: this_name = options['-n'] = name nodename, this_suffix = nodesplit(name) name = nodename else: nodename = '%s%s' % (prefix, name) this_name = options['-n'] = '%s@%s' % (nodename, this_suffix) expand = abbreviations({ '%h': this_name, '%n': name, '%N': nodename, '%d': this_suffix }) argv = ([expand(cmd)] + [ format_opt(opt, expand(value)) for opt, value in items(p.optmerge(name, options)) ] + [passthrough]) if append: argv.append(expand(append)) yield multi_args_t(this_name, argv, expand, name)