def flush(self): if self._state == TERMINATE: return # cancel all tasks that haven't been accepted so that NACK is sent. for job in values(self._cache): if not job._accepted: job._cancel() # clear the outgoing buffer as the tasks will be redelivered by # the broker anyway. if self.outbound_buffer: self.outbound_buffer.clear() self.maintain_pool() try: # ...but we must continue writing the payloads we already started # to keep message boundaries. # The messages may be NACK'ed later if synack is enabled. if self._state == RUN: # flush outgoing buffers intervals = fxrange(0.01, 0.1, 0.01, repeatlast=True) owned_by = {} for job in values(self._cache): writer = _get_job_writer(job) if writer is not None: owned_by[writer] = job while self._active_writers: writers = list(self._active_writers) for gen in writers: if gen.__name__ == "_write_job" and gen_not_started(gen): # hasn't started writing the job so can # discard the task, but we must also remove # it from the Pool._cache. try: job = owned_by[gen] except KeyError: pass else: # removes from Pool._cache job.discard() self._active_writers.discard(gen) else: try: job = owned_by[gen] except KeyError: pass else: job_proc = job._write_to if job_proc._is_alive(): self._flush_writer(job_proc, gen) # workers may have exited in the meantime. self.maintain_pool() sleep(next(intervals)) # don't busyloop finally: self.outbound_buffer.clear() self._active_writers.clear() self._active_writes.clear() self._busy_workers.clear()
def test_compat_indices(self): assert not any(key.isupper() for key in DEFAULTS) assert not any(key.islower() for key in _OLD_DEFAULTS) assert not any(key.isupper() for key in _TO_OLD_KEY) assert not any(key.islower() for key in _TO_NEW_KEY) assert not any(key.isupper() for key in SETTING_KEYS) assert not any(key.islower() for key in _OLD_SETTING_KEYS) assert not any(value.isupper() for value in values(_TO_NEW_KEY)) assert not any(value.islower() for value in values(_TO_OLD_KEY)) for key in _TO_NEW_KEY: assert key in _OLD_SETTING_KEYS for key in _TO_OLD_KEY: assert key in SETTING_KEYS
def test_compat_indices(self): self.assertFalse(any(key.isupper() for key in DEFAULTS)) self.assertFalse(any(key.islower() for key in _OLD_DEFAULTS)) self.assertFalse(any(key.isupper() for key in _TO_OLD_KEY)) self.assertFalse(any(key.islower() for key in _TO_NEW_KEY)) self.assertFalse(any(key.isupper() for key in SETTING_KEYS)) self.assertFalse(any(key.islower() for key in _OLD_SETTING_KEYS)) self.assertFalse(any(value.isupper() for value in values(_TO_NEW_KEY))) self.assertFalse(any(value.islower() for value in values(_TO_OLD_KEY))) for key in _TO_NEW_KEY: self.assertIn(key, _OLD_SETTING_KEYS) for key in _TO_OLD_KEY: self.assertIn(key, SETTING_KEYS)
def filtertests(self, group, names): tests = self.groups[group] try: return ([tests[n] for n in names] if names else list(values(tests))) except KeyError as exc: raise KeyError('Unknown test name: {0}'.format(exc))
def periodic(self): for worker in values(self.state.workers): if not worker.alive: try: self.on_node_lost(worker) finally: self.state.workers.pop(worker.hostname, None)
def run(self, force_ipython=False, force_bpython=False, force_python=False, without_tasks=False, eventlet=False, gevent=False, **kwargs): sys.path.insert(0, os.getcwd()) if eventlet: import_module('celery.concurrency.eventlet') if gevent: import_module('celery.concurrency.gevent') import celery import celery.task.base self.app.loader.import_default_modules() self.locals = {'celery': self.app, 'Task': celery.Task, 'chord': celery.chord, 'group': celery.group, 'chain': celery.chain, 'chunks': celery.chunks, 'xmap': celery.xmap, 'xstarmap': celery.xstarmap, 'subtask': celery.subtask} if not without_tasks: self.locals.update(dict( (task.__name__, task) for task in values(self.app.tasks) if not task.name.startswith('celery.')), ) if force_python: return self.invoke_fallback_shell() elif force_bpython: return self.invoke_bpython_shell() elif force_ipython: return self.invoke_ipython_shell() return self.invoke_default_shell()
def find_app(self, app): try: sym = self.symbol_by_name(app) except AttributeError: # last part was not an attribute, but a module sym = import_from_cwd(app) if isinstance(sym, ModuleType): try: return sym.app except AttributeError: try: return sym.celery except AttributeError: if getattr(sym, '__path__', None): try: return self.find_app( '{0}.celery:'.format(app.replace(':', '')), ) except ImportError: pass for suspect in values(vars(sym)): if isinstance(suspect, Celery): return suspect raise return sym
def _get(self): # If the first bucket is always returning items, we would never # get to fetch items from the other buckets. So we always iterate over # all the buckets and put any ready items into a queue called # "immediate". This queue is always checked for cached items first. try: return 0, self._get_immediate() except Empty: pass remaining_times = [] for bucket in values(self.buckets): remaining = bucket.expected_time() if not remaining: try: # Just put any ready items into the immediate queue. self.immediate.append(bucket.get_nowait()) except Empty: pass except RateLimitExceeded: remaining_times.append(bucket.expected_time()) else: remaining_times.append(remaining) # Try the immediate queue again. try: return 0, self._get_immediate() except Empty: if not remaining_times: # No items in any of the buckets. raise # There's items, but have to wait before we can retrieve them, # return the shortest remaining time. return min(remaining_times), None
def flush(self): # cancel all tasks that have not been accepted so that NACK is sent. for job in values(self._pool._cache): if not job._accepted: job._cancel() # clear the outgoing buffer as the tasks will be redelivered by # the broker anyway. if self.outbound_buffer: self.outbound_buffer.clear() try: # ...but we must continue writing the payloads we already started # to keep message boundaries. # The messages may be NACK'ed later if synack is enabled. if self._pool._state == RUN: # flush outgoing buffers intervals = fxrange(0.01, 0.1, 0.01, repeatlast=True) while self._active_writers: writers = list(self._active_writers) for gen in writers: if gen.__name__ == "_write_job" and gen.gi_frame and gen.gi_frame.f_lasti != -1: # has not started writing the job so can # safely discard self._active_writers.discard(gen) else: try: next(gen) except StopIteration: self._active_writers.discard(gen) # workers may have exited in the meantime. self.maintain_pool() sleep(next(intervals)) # don't busyloop finally: self.outbound_buffer.clear() self._active_writers.clear()
def on_process_up(proc): """Called when a process has started.""" # If we got the same fd as a previous process then we will also # receive jobs in the old buffer, so we need to reset the # job._write_to and job._scheduled_for attributes used to recover # message boundaries when processes exit. infd = proc.inqW_fd for job in values(cache): if job._write_to and job._write_to.inqW_fd == infd: job._write_to = proc if job._scheduled_for and job._scheduled_for.inqW_fd == infd: job._scheduled_for = proc fileno_to_outq[proc.outqR_fd] = proc # maintain_pool is called whenever a process exits. add_reader( proc.sentinel, event_process_exit, hub, proc.sentinel, ) assert not isblocking(proc.outq._reader) # handle_result_event is called when the processes outqueue is # readable. add_reader(proc.outqR_fd, handle_result_event, proc.outqR_fd) waiting_to_start.add(proc) hub.call_later( self._proc_alive_timeout, verify_process_alive, proc, )
def find_app(app, symbol_by_name=symbol_by_name, imp=import_from_cwd): """Find app by name.""" from .base import Celery try: sym = symbol_by_name(app, imp=imp) except AttributeError: # last part was not an attribute, but a module sym = imp(app) if isinstance(sym, ModuleType) and ":" not in app: try: found = sym.app if isinstance(found, ModuleType): raise AttributeError() except AttributeError: try: found = sym.celery if isinstance(found, ModuleType): raise AttributeError() except AttributeError: if getattr(sym, "__path__", None): try: return find_app("{0}.celery".format(app), symbol_by_name=symbol_by_name, imp=imp) except ImportError: pass for suspect in values(vars(sym)): if isinstance(suspect, Celery): return suspect raise else: return found else: return found return sym
def flush(self): if self._state == TERMINATE: return # cancel all tasks that have not been accepted so that NACK is sent. for job in values(self._cache): if not job._accepted: job._cancel() # clear the outgoing buffer as the tasks will be redelivered by # the broker anyway. if self.outbound_buffer: self.outbound_buffer.clear() try: # ...but we must continue writing the payloads we already started # to keep message boundaries. # The messages may be NACK'ed later if synack is enabled. if self._state == RUN: # flush outgoing buffers intervals = fxrange(0.01, 0.1, 0.01, repeatlast=True) while self._active_writers: writers = list(self._active_writers) for gen in writers: if (gen.__name__ == '_write_job' and gen_not_started(gen)): # has not started writing the job so can # discard the task, but we must also remove # it from the Pool._cache. job_to_discard = None for job in values(self._cache): if job._writer() is gen: # _writer is saferef # removes from Pool._cache job_to_discard = job break if job_to_discard: job_to_discard.discard() self._active_writers.discard(gen) else: try: next(gen) except StopIteration: self._active_writers.discard(gen) # workers may have exited in the meantime. self.maintain_pool() sleep(next(intervals)) # don't busyloop finally: self.outbound_buffer.clear() self._active_writers.clear()
def periodic(self): dirty = set() for worker in values(self.state.workers): if not worker.alive: dirty.add(worker) self.on_node_lost(worker) for worker in dirty: self.state.workers.pop(worker.hostname, None)
def periodic(self): workers = self.state.workers dirty = set() for worker in values(workers): if not worker.alive: dirty.add(worker) self.on_node_lost(worker) for worker in dirty: workers.pop(worker.hostname, None)
def tick(self): if not self.rdb.exists(self.key): logger.warn("key: {} not in rdb".format(self.key)) for e in values(self.schedule): self.rdb.zadd( self.key, {jsonpickle.encode(e): self._when(e, e.is_due()[1]) or 0}) tasks = self.rdb.zrangebyscore( self.key, 0, self.adjust(mktime(self.app.now().timetuple()), drift=0.010), withscores=True) or [] next_times = [ self.max_interval, ] for task, score in tasks: entry = jsonpickle.decode(task) is_due, next_time_to_run = self.is_due(entry) next_times.append(next_time_to_run) if is_due: next_entry = self.reserve(entry) try: linfo("add task entry: {} to publisher".format(entry.name)) result = self.apply_async(entry) except Exception as exc: error('Message Error: %s\n%s', exc, traceback.format_stack(), exc_info=True) else: debug('%s sent. id->%s', entry.task, result.id) self.rdb.zrem(self.key, task) self.rdb.zadd( self.key, { jsonpickle.encode(next_entry): self._when(next_entry, next_time_to_run) or 0 }) next_task = self.rdb.zrangebyscore(self.key, 0, MAXINT, withscores=True, num=1, start=0) if not next_task: linfo("no next task found") return min(next_times) entry = jsonpickle.decode(next_task[0][0]) next_times.append(self.is_due(entry)[1]) return min(next_times)
def setup_includes(self, includes): # Update celery_include to have all known task modules, so that we # ensure all task modules are imported in case an execv happens. prev = tuple(self.app.conf.CELERY_INCLUDE) if includes: prev += tuple(includes) [self.app.loader.import_task_module(m) for m in includes] self.include = includes task_modules = {task.__class__.__module__ for task in values(self.app.tasks)} self.app.conf.CELERY_INCLUDE = tuple(set(prev) | task_modules)
def start(self, c): info('mingle: searching for neighbors') I = c.app.control.inspect(timeout=1.0, connection=c.connection) replies = I.hello() if replies: for reply in values(replies): c.app.clock.adjust(reply['clock']) revoked.update(reply['revoked']) info('mingle: synced with %s', ', '.join(replies)) else: info('mingle: no one here')
def __enter__(self): # The __warningregistry__'s need to be in a pristine state for tests # to work properly. warnings.resetwarnings() for v in list(values(sys.modules)): if getattr(v, '__warningregistry__', None): v.__warningregistry__ = {} self.warnings_manager = warnings.catch_warnings(record=True) self.warnings = self.warnings_manager.__enter__() warnings.simplefilter('always', self.expected) return self
def setup_includes(self, includes): # Update celery_include to have all known task modules, so that we # ensure all task modules are imported in case an execv happens. prev = tuple(self.app.conf.CELERY_INCLUDE) if includes: prev += tuple(includes) [self.app.loader.import_task_module(m) for m in includes] self.include = includes task_modules = set(task.__class__.__module__ for task in values(self.app.tasks)) self.app.conf.CELERY_INCLUDE = tuple(set(prev) | task_modules)
def setup_includes(self, includes): # Update celery_include to have all known task modules, so that we # ensure all task modules are imported in case an execv happens. inc = self.app.conf.CELERY_INCLUDE if includes: includes = str_to_list(includes) inc = self.app.conf.CELERY_INCLUDE = tuple(inc) + tuple(includes) self.include = includes task_modules = set(task.__class__.__module__ for task in values(self.app.tasks)) self.app.conf.CELERY_INCLUDE = tuple(set(inc) | task_modules)
def populate_heap(self, event_t=event_t, heapify=heapq.heapify): """Populate the heap with the data contained in the schedule.""" priority = 5 self._heap = [] for entry in values(self.schedule): is_due, next_call_delay = entry.is_due() self._heap.append( event_t( self._when(entry, 0 if is_due else next_call_delay) or 0, priority, entry)) heapify(self._heap)
def finalize(self): with self._finalize_mutex: if not self.finalized: self.finalized = True load_shared_tasks(self) pending = self._pending while pending: maybe_evaluate(pending.popleft()) for task in values(self._tasks): task.bind(self)
def setup_includes(self, includes): # Update celery_include to have all known task modules, so that we # ensure all task modules are imported in case an execv happens. inc = self.app.conf.CELERY_INCLUDE if includes: if isinstance(includes, string_t): includes = includes.split(',') inc = self.app.conf.CELERY_INCLUDE = tuple(inc) + tuple(includes) self.include = includes task_modules = set(task.__class__.__module__ for task in values(self.app.tasks)) self.app.conf.CELERY_INCLUDE = tuple(set(inc) | task_modules)
def on_close(self): # Clear internal queues to get rid of old messages. # They can't be acked anyway, as a delivery tag is specific # to the current channel. if self.controller and self.controller.semaphore: self.controller.semaphore.clear() if self.timer: self.timer.clear() for bucket in values(self.task_buckets): if bucket: bucket.clear_pending() reserved_requests.clear() if self.pool and self.pool.flush: self.pool.flush()
def finalize(self, auto=False): with self._finalize_mutex: if not self.finalized: if auto and not self.autofinalize: raise RuntimeError("Contract breach: app not finalized") self.finalized = True _announce_app_finalized(self) pending = self._pending while pending: maybe_evaluate(pending.popleft()) for task in values(self._tasks): task.bind(self)
def finalize(self, auto=False): with self._finalize_mutex: if not self.finalized: if auto and not self.autofinalize: raise RuntimeError('Contract breach: app not finalized') self.finalized = True load_shared_tasks(self) pending = self._pending while pending: maybe_evaluate(pending.popleft()) for task in values(self._tasks): task.bind(self)
def on_process_up(proc): # If we got the same fd as a previous process then we will also # receive jobs in the old buffer, so we need to reset the # _write_to and _scheduled_for tracking values used to recover # message boundaries when processes exit. infd = proc.inqW_fd for job in values(pool._cache): if job._write_to and job._write_to.inqW_fd == infd: job._write_to = proc if job._scheduled_for and job._scheduled_for.inqW_fd == infd: job._scheduled_for = proc fileno_to_outq[proc.outqR_fd] = proc hub_add(proc.sentinel, maintain_pool, READ | ERR) hub_add(proc.outqR_fd, handle_result_event, READ | ERR)
def human_write_stats(self): if self.write_stats is None: return 'N/A' vals = list(values(self.write_stats)) total = sum(vals) def per(v, total): return '{0:.2f}%'.format((float(v) / total) * 100.0 if v else 0) return { 'total': total, 'avg': per(total / len(self.write_stats) if total else 0, total), 'all': ', '.join(per(v, total) for v in vals) }
def as_dict(self): """Whole set as serializable dictionary. Example: >>> s = LimitedSet(maxlen=200) >>> r = LimitedSet(maxlen=200) >>> for i in range(500): ... s.add(i) ... >>> r.update(s.as_dict()) >>> r == s True """ return {key: inserted for inserted, key in values(self._data)}
def human_write_stats(self): if self.write_stats is None: return "N/A" vals = list(values(self.write_stats)) total = sum(vals) def per(v, total): return "{0:.2f}%".format((float(v) / total) * 100.0 if v else 0) return { "total": total, "avg": per(total / len(self.write_stats) if total else 0, total), "all": ", ".join(per(v, total) for v in vals), }
def setup_schedule(self): """ PersistentScheduler + s/self.scheduler/self._store['entries'] """ try: self._store = self.persistence.open(self.schedule_filename, writeback=True) except Exception as exc: error('Removing corrupted schedule file %r: %r', self.schedule_filename, exc, exc_info=True) self._remove_db() self._store = self.persistence.open(self.schedule_filename, writeback=True) else: try: self._store['entries'] except KeyError: # new schedule db self._store['entries'] = {} else: if '__version__' not in self._store: warning('DB Reset: Account for new __version__ field') self._store.clear() # remove schedule at 2.2.2 upgrade. elif 'tz' not in self._store: warning('DB Reset: Account for new tz field') self._store.clear() # remove schedule at 3.0.8 upgrade elif 'utc_enabled' not in self._store: warning('DB Reset: Account for new utc_enabled field') self._store.clear() # remove schedule at 3.0.9 upgrade tz = self.app.conf.CELERY_TIMEZONE stored_tz = self._store.get('tz') if stored_tz is not None and stored_tz != tz: warning('Reset: Timezone changed from %r to %r', stored_tz, tz) self._store.clear() # Timezone changed, reset db! utc = self.app.conf.CELERY_ENABLE_UTC stored_utc = self._store.get('utc_enabled') if stored_utc is not None and stored_utc != utc: choices = {True: 'enabled', False: 'disabled'} warning('Reset: UTC changed from %s to %s', choices[stored_utc], choices[utc]) self._store.clear() # UTC setting changed, reset db! entries = self._store.setdefault('entries', {}) self.merge_inplace(self.app.conf.CELERYBEAT_SCHEDULE) self.install_default_entries(self._store['entries']) self._store.update(__version__=__version__, tz=tz, utc_enabled=utc) self.sync() debug('Current schedule:\n' + '\n'.join(repr(entry) for entry in values(entries)))
def iter_open_logger_fds(): seen = set() loggers = (list(values(logging.Logger.manager.loggerDict)) + [logging.getLogger(None)]) for l in loggers: try: for handler in l.handlers: try: if handler not in seen: # pragma: no cover yield handler.stream seen.add(handler) except AttributeError: pass except AttributeError: # PlaceHolder does not have handlers pass
def tick(self, min=min, **kwargs): if self.lock: logger.debug('beat: Extending lock...') redis(self.app).pexpire(self.lock_key, int(self.lock_timeout * 1000)) remaining_times = [] try: for entry in values(self.schedule): next_time_to_run = self.maybe_due(entry, **self._maybe_due_kwargs) if next_time_to_run: remaining_times.append(next_time_to_run) except RuntimeError: logger.debug('beat: RuntimeError', exc_info=True) return min(remaining_times + [self.max_interval])
def start(self, c): info('mingle: searching for neighbors') I = c.app.control.inspect(timeout=1.0, connection=c.connection) replies = I.hello() if replies: for reply in values(replies): try: other_clock, other_revoked = MINGLE_GET_FIELDS(reply) except KeyError: # reply from pre-3.1 worker pass c.app.clock.adjust(other_clock) revoked.update(other_revoked) info('mingle: synced with %s', ', '.join(replies)) else: info('mingle: no one here')
def _finalize_steps(self, steps): last = self._find_last() self._firstpass(steps) it = ((C, C.requires) for C in values(steps)) G = self.graph = DependencyGraph( it, formatter=self.GraphFormatter(root=last), ) if last: for obj in G: if obj != last: G.add_edge(last, obj) try: return G.topsort() except KeyError as exc: raise KeyError('unknown bootstep: %s' % exc)
def human_write_stats(self): if self.write_stats is None: return "N/A" vals = list(values(self.write_stats)) total = sum(vals) def per(v, total): return "{0:.2f}%".format((float(v) / total) * 100.0 if v else 0) return { "total": total, "avg": per(total / len(self.write_stats) if total else 0, total), "all": ", ".join(per(v, total) for v in vals), "raw": ", ".join(map(str, vals)), "inqueues": {"total": len(self._all_inqueues), "active": len(self._active_writes)}, }
def setup_schedule(self): """ PersistentScheduler + s/self.scheduler/self._store['entries'] """ try: self._store = self.persistence.open(self.schedule_filename, writeback=True) except Exception as exc: error('Removing corrupted schedule file %r: %r', self.schedule_filename, exc, exc_info=True) self._remove_db() self._store = self.persistence.open(self.schedule_filename, writeback=True) else: try: self._store['entries'] except KeyError: # new schedule db self._store['entries'] = {} else: if '__version__' not in self._store: warning('DB Reset: Account for new __version__ field') self._store.clear() # remove schedule at 2.2.2 upgrade. elif 'tz' not in self._store: warning('DB Reset: Account for new tz field') self._store.clear() # remove schedule at 3.0.8 upgrade elif 'utc_enabled' not in self._store: warning('DB Reset: Account for new utc_enabled field') self._store.clear() # remove schedule at 3.0.9 upgrade tz = self.app.conf.CELERY_TIMEZONE stored_tz = self._store.get('tz') if stored_tz is not None and stored_tz != tz: warning('Reset: Timezone changed from %r to %r', stored_tz, tz) self._store.clear() # Timezone changed, reset db! utc = self.app.conf.CELERY_ENABLE_UTC stored_utc = self._store.get('utc_enabled') if stored_utc is not None and stored_utc != utc: choices = {True: 'enabled', False: 'disabled'} warning('Reset: UTC changed from %s to %s', choices[stored_utc], choices[utc]) self._store.clear() # UTC setting changed, reset db! entries = self._store.setdefault('entries', {}) self.merge_inplace(self.app.conf.CELERYBEAT_SCHEDULE) self.install_default_entries(self._store['entries']) self._store.update(__version__=__version__, tz=tz, utc_enabled=utc) self.sync() debug('Current schedule:\n' + '\n'.join( repr(entry) for entry in values(entries)))
def _run(self, ipython=False, bpython=False, python=False, without_tasks=False, eventlet=False, gevent=False, **kwargs): sys.path.insert(0, os.getcwd()) if eventlet: import_module("celery.concurrency.eventlet") if gevent: import_module("celery.concurrency.gevent") import celery import celery.task.base self.app.loader.import_default_modules() # pylint: disable=attribute-defined-outside-init self.locals = { "app": self.app, "celery": self.app, "Task": celery.Task, "chord": celery.chord, "group": celery.group, "chain": celery.chain, "chunks": celery.chunks, "xmap": celery.xmap, "xstarmap": celery.xstarmap, "subtask": celery.subtask, "signature": celery.signature, } if not without_tasks: self.locals.update({ task.__name__: task for task in values(self.app.tasks) if not task.name.startswith("celery.") }) if python: return self.invoke_fallback_shell() elif bpython: return self.invoke_bpython_shell() elif ipython: return self.invoke_ipython_shell() return self.invoke_default_shell()
def init_groups(self): acc = defaultdict(list) for attr in dir(self): if not _is_descriptor(self, attr): meth = getattr(self, attr) try: groups = meth.__func__.__testgroup__ except AttributeError: pass else: for g in groups: acc[g].append(meth) # sort the tests by the order in which they are defined in the class for g in values(acc): g[:] = sorted(g, key=lambda m: m.__func__.__testsort__) self.groups = dict( (name, testgroup(*tests)) for name, tests in items(acc))
def _run(self, ipython=False, bpython=False, python=False, without_tasks=False, eventlet=False, gevent=False, **kwargs): sys.path.insert(0, os.getcwd()) if eventlet: import_module('celery.concurrency.eventlet') if gevent: import_module('celery.concurrency.gevent') import celery import celery.task.base self.app.loader.import_default_modules() # pylint: disable=attribute-defined-outside-init self.locals = { 'app': self.app, 'celery': self.app, 'Task': celery.Task, 'chord': celery.chord, 'group': celery.group, 'chain': celery.chain, 'chunks': celery.chunks, 'xmap': celery.xmap, 'xstarmap': celery.xstarmap, 'subtask': celery.subtask, 'signature': celery.signature, } if not without_tasks: self.locals.update({ task.__name__: task for task in values(self.app.tasks) if not task.name.startswith('celery.') }) if python: return self.invoke_fallback_shell() elif bpython: return self.invoke_bpython_shell() elif ipython: return self.invoke_ipython_shell() return self.invoke_default_shell()
def human_write_stats(self): if self.write_stats is None: return "N/A" vals = list(values(self.write_stats)) total = sum(vals) def per(v, total): return "{0:.2%}".format((float(v) / total) if v else 0) return { "total": total, "avg": per(total / len(self.write_stats) if total else 0, total), "all": ", ".join(per(v, total) for v in vals), "raw": ", ".join(map(str, vals)), "strategy": SCHED_STRATEGY_TO_NAME.get(self.sched_strategy, self.sched_strategy), "inqueues": {"total": len(self._all_inqueues), "active": len(self._active_writes)}, }
def run( self, force_ipython=False, force_bpython=False, force_python=False, without_tasks=False, eventlet=False, gevent=False, **kwargs ): sys.path.insert(0, os.getcwd()) if eventlet: import_module("celery.concurrency.eventlet") if gevent: import_module("celery.concurrency.gevent") import celery import celery.task.base self.app.loader.import_default_modules() self.locals = { "app": self.app, "celery": self.app, "Task": celery.Task, "chord": celery.chord, "group": celery.group, "chain": celery.chain, "chunks": celery.chunks, "xmap": celery.xmap, "xstarmap": celery.xstarmap, "subtask": celery.subtask, "signature": celery.signature, } if not without_tasks: self.locals.update( {task.__name__: task for task in values(self.app.tasks) if not task.name.startswith("celery.")} ) if force_python: return self.invoke_fallback_shell() elif force_bpython: return self.invoke_bpython_shell() elif force_ipython: return self.invoke_ipython_shell() return self.invoke_default_shell()
def finalize(self, auto=False): """Finalizes the app by loading built-in tasks, and evaluating pending task decorators.""" with self._finalize_mutex: if not self.finalized: if auto and not self.autofinalize: raise RuntimeError('Contract breach: app not finalized') self.finalized = True _announce_app_finalized(self) pending = self._pending while pending: maybe_evaluate(pending.popleft()) for task in values(self._tasks): task.bind(self) self.on_after_finalize.send(sender=self)
def on_process_up(proc): """Called when a WORKER_UP message is received from process.""" # If we got the same fd as a previous process then we will also # receive jobs in the old buffer, so we need to reset the # job._write_to and job._scheduled_for attributes used to recover # message boundaries when processes exit. infd = proc.inqW_fd for job in values(cache): if job._write_to and job._write_to.inqW_fd == infd: job._write_to = proc if job._scheduled_for and job._scheduled_for.inqW_fd == infd: job._scheduled_for = proc fileno_to_outq[proc.outqR_fd] = proc # maintain_pool is called whenever a process exits. add_reader(proc.sentinel, maintain_pool) # handle_result_event is called when the processes outqueue is # readable. add_reader(proc.outqR_fd, handle_result_event, proc.outqR_fd)
def human_write_stats(self): if self.write_stats is None: return 'N/A' vals = list(values(self.write_stats)) total = sum(vals) def per(v, total): return '{0:.2f}%'.format((float(v) / total) * 100.0 if v else 0) return { 'total': total, 'avg': per(total / len(self.write_stats) if total else 0, total), 'all': ', '.join(per(v, total) for v in vals), 'raw': ', '.join(map(str, vals)), 'inqueues': { 'total': len(self._all_inqueues), 'active': len(self._active_writes), } }
def start(self, c): info('mingle: searching for neighbors') I = c.app.control.inspect(timeout=1.0, connection=c.connection) replies = I.hello(c.hostname, revoked._data) or {} replies.pop(c.hostname, None) if replies: info('mingle: hello %s! sync with me', ', '.join(reply for reply, value in items(replies) if value)) for reply in values(replies): if reply: try: other_clock, other_revoked = MINGLE_GET_FIELDS(reply) except KeyError: # reply from pre-3.1 worker pass else: c.app.clock.adjust(other_clock) revoked.update(other_revoked) else: info('mingle: all alone')
def find_app(self, app): try: sym = self.symbol_by_name(app) except AttributeError: # last part was not an attribute, but a module sym = import_from_cwd(app) if isinstance(sym, ModuleType): try: return sym.celery except AttributeError: if getattr(sym, '__path__', None): return self.find_app('{0}.celery:'.format( app.replace(':', ''))) from celery.app.base import Celery for suspect in values(vars(sym)): if isinstance(suspect, Celery): return suspect raise return sym
def schedule(self): update = False if not self._initial_read: debug('DatabaseScheduler: initial read') update = True self._initial_read = True elif self.schedule_changed(): info('DatabaseScheduler: Schedule changed.') update = True if update: self.sync() self._schedule = self.all_as_schedule() if logger.isEnabledFor(logging.DEBUG): debug( 'Current schedule:\n%s', '\n'.join(repr(entry) for entry in values(self._schedule)), ) return self._schedule
def run(self, force_ipython=False, force_bpython=False, force_python=False, without_tasks=False, eventlet=False, gevent=False, **kwargs): sys.path.insert(0, os.getcwd()) if eventlet: import_module('celery.concurrency.eventlet') if gevent: import_module('celery.concurrency.gevent') import celery import celery.task.base self.app.loader.import_default_modules() self.locals = { 'app': self.app, 'celery': self.app, 'Task': celery.Task, 'chord': celery.chord, 'group': celery.group, 'chain': celery.chain, 'chunks': celery.chunks, 'xmap': celery.xmap, 'xstarmap': celery.xstarmap, 'subtask': celery.subtask, 'signature': celery.signature } if not without_tasks: self.locals.update( dict((task.__name__, task) for task in values(self.app.tasks) if not task.name.startswith('celery.')), ) if force_python: return self.invoke_fallback_shell() elif force_bpython: return self.invoke_bpython_shell() elif force_ipython: return self.invoke_ipython_shell() return self.invoke_default_shell()
def human_write_stats(self): if self.write_stats is None: return 'N/A' vals = list(values(self.write_stats)) total = sum(vals) def per(v, total): return '{0:.2%}'.format((float(v) / total) if v else 0) return { 'total': total, 'avg': per(total / len(self.write_stats) if total else 0, total), 'all': ', '.join(per(v, total) for v in vals), 'raw': ', '.join(map(str, vals)), 'strategy': SCHED_STRATEGY_TO_NAME[self.sched_strategy], 'inqueues': { 'total': len(self._all_inqueues), 'active': len(self._active_writes), } }
def __enter__(self): # The __warningregistry__'s need to be in a pristine state for tests # to work properly. warnings.resetwarnings() for v in list(values(sys.modules)): # do not evaluate Django moved modules and other lazily # initialized modules. if v and not _is_magic_module(v): # use raw __getattribute__ to protect even better from # lazily loaded modules try: object.__getattribute__(v, '__warningregistry__') except AttributeError: pass else: object.__setattr__(v, '__warningregistry__', {}) self.warnings_manager = warnings.catch_warnings(record=True) self.warnings = self.warnings_manager.__enter__() warnings.simplefilter('always', self.expected) return self