def assertSafeIter(self, method, interval=0.01, size=10000): from threading import Thread, Event from time import sleep x = LRUCache(size) x.update(zip(range(size), range(size))) class Burglar(Thread): def __init__(self, cache): self.cache = cache self.__is_shutdown = Event() self.__is_stopped = Event() Thread.__init__(self) def run(self): while not self.__is_shutdown.isSet(): try: self.cache.data.popitem(last=False) except KeyError: break self.__is_stopped.set() def stop(self): self.__is_shutdown.set() self.__is_stopped.wait() self.join(THREAD_TIMEOUT_MAX) burglar = Burglar(x) burglar.start() try: for _ in getattr(x, method)(): sleep(0.0001) finally: burglar.stop()
def test_default_crontab_spec(self): c = crontab() self.assertEqual(c.minute, set(range(60))) self.assertEqual(c.hour, set(range(24))) self.assertEqual(c.day_of_week, set(range(7))) self.assertEqual(c.day_of_month, set(range(1, 32))) self.assertEqual(c.month_of_year, set(range(1, 13)))
def test_group_to_chord(self): c = ( self.add.s(5) | group([self.add.s(i, i) for i in range(5)], app=self.app) | self.add.s(10) | self.add.s(20) | self.add.s(30) ) c._use_link = True tasks, results = c.prepare_steps((), c.tasks) self.assertEqual(tasks[-1].args[0], 5) self.assertIsInstance(tasks[-2], chord) self.assertEqual(len(tasks[-2].tasks), 5) self.assertEqual(tasks[-2].parent_id, tasks[-1].id) self.assertEqual(tasks[-2].root_id, tasks[-1].id) self.assertEqual(tasks[-2].body.args[0], 10) self.assertEqual(tasks[-2].body.parent_id, tasks[-2].id) self.assertEqual(tasks[-3].args[0], 20) self.assertEqual(tasks[-3].root_id, tasks[-1].id) self.assertEqual(tasks[-3].parent_id, tasks[-2].body.id) self.assertEqual(tasks[-4].args[0], 30) self.assertEqual(tasks[-4].parent_id, tasks[-3].id) self.assertEqual(tasks[-4].root_id, tasks[-1].id) self.assertTrue(tasks[-2].body.options['link']) self.assertTrue(tasks[-2].body.options['link'][0].options['link']) c2 = self.add.s(2, 2) | group(self.add.s(i, i) for i in range(10)) c2._use_link = True tasks2, _ = c2.prepare_steps((), c2.tasks) self.assertIsInstance(tasks2[0], group)
def test_join_native(self): backend = SimpleBackend() subtasks = [self.app.AsyncResult(uuid(), backend=backend) for i in range(10)] ts = self.app.GroupResult(uuid(), subtasks) backend.ids = [subtask.id for subtask in subtasks] res = ts.join_native() self.assertEqual(res, list(range(10)))
def test_simple_crontab_spec(self): c = crontab(minute=30) self.assertEqual(c.minute, set([30])) self.assertEqual(c.hour, set(range(24))) self.assertEqual(c.day_of_week, set(range(7))) self.assertEqual(c.day_of_month, set(range(1, 32))) self.assertEqual(c.month_of_year, set(range(1, 13)))
def test_forward_options(self): body = xsum.s() x = chord([add.s(i, i) for i in range(10)], body=body) x.apply_async(group_id='some_group_id') self.assertEqual(body.options['group_id'], 'some_group_id') x2 = chord([add.s(i, i) for i in range(10)], body=body) x2.apply_async(chord='some_chord_id') self.assertEqual(body.options['chord'], 'some_chord_id')
def test_parse_star(self): self.assertEqual(crontab_parser(24).parse('*'), set(range(24))) self.assertEqual(crontab_parser(60).parse('*'), set(range(60))) self.assertEqual(crontab_parser(7).parse('*'), set(range(7))) self.assertEqual(crontab_parser(31, 1).parse('*'), set(range(1, 31 + 1))) self.assertEqual(crontab_parser(12, 1).parse('*'), set(range(1, 12 + 1)))
def _revoketerm(self, wait=None, terminate=True, joindelay=True, data=BIG): g = group(any_.s(data, sleep=wait) for i in range(8)) r = g() if terminate: if joindelay: sleep(random.choice(range(4))) r.revoke(terminate=True) self.join(r, timeout=100)
def test_fill_rate(self): x = buckets.TokenBucketQueue(fill_rate=10) # 20 items should take at least one second to complete time_start = time.time() [x.put(str(i)) for i in range(20)] for i in range(20): sys.stderr.write('.') x.wait() self.assertGreater(time.time() - time_start, 1.5)
def __init__(self, *args, **kwargs): self.started = True self._timeout_handler = Mock() self._result_handler = Mock() self.maintain_pool = Mock() self._state = mp.RUN self._processes = kwargs.get("processes") self._pool = [Object(pid=i, inqW_fd=1, outqR_fd=2) for i in range(self._processes)] self._current_proc = cycle(range(self._processes))
def test_parse_range(self): self.assertEqual(crontab_parser(60).parse('1-10'), set(range(1, 10 + 1))) self.assertEqual(crontab_parser(24).parse('0-20'), set(range(0, 20 + 1))) self.assertEqual(crontab_parser().parse('2-10'), set(range(2, 10 + 1))) self.assertEqual(crontab_parser(60, 1).parse('1-10'), set(range(1, 10 + 1)))
def test_group_to_chord(self): c = group(self.add.s(i, i) for i in range(5)) | self.add.s(10) | self.add.s(20) | self.add.s(30) tasks, _ = c.type.prepare_steps((), c.tasks) self.assertIsInstance(tasks[0], chord) self.assertTrue(tasks[0].body.options["link"]) self.assertTrue(tasks[0].body.options["link"][0].options["link"]) c2 = self.add.s(2, 2) | group(self.add.s(i, i) for i in range(10)) tasks2, _ = c2.type.prepare_steps((), c2.tasks) self.assertIsInstance(tasks2[1], group)
def test_run(self): self.app.backend = Mock() self.app.backend.cleanup = Mock() self.app.backend.cleanup.__name__ = 'cleanup' Chord = self.app.tasks['celery.chord'] body = self.add.signature() Chord(group(self.add.signature((i, i)) for i in range(5)), body) Chord([self.add.signature((j, j)) for j in range(5)], body) self.assertEqual(self.app.backend.apply_chord.call_count, 2)
def test_join_native(self): backend = SimpleBackend() results = [self.app.AsyncResult(uuid(), backend=backend) for i in range(10)] ts = self.app.GroupResult(uuid(), results) ts.app.backend = backend backend.ids = [result.id for result in results] res = ts.join_native() self.assertEqual(res, list(range(10))) callback = Mock(name="callback") self.assertFalse(ts.join_native(callback=callback)) callback.assert_has_calls([call(r.id, i) for i, r in enumerate(ts.results)])
def test_run(self): prev, current_app.backend = current_app.backend, Mock() current_app.backend.cleanup = Mock() current_app.backend.cleanup.__name__ = 'cleanup' try: Chord = current_app.tasks['celery.chord'] body = dict() Chord(TaskSet(add.subtask((i, i)) for i in range(5)), body) Chord([add.subtask((i, i)) for i in range(5)], body) self.assertEqual(current_app.backend.on_chord_apply.call_count, 2) finally: current_app.backend = prev
def test_parse_steps(self): self.assertEqual(crontab_parser(8).parse('*/2'), set([0, 2, 4, 6])) self.assertEqual(crontab_parser().parse('*/2'), set(i * 2 for i in range(30))) self.assertEqual(crontab_parser().parse('*/3'), set(i * 3 for i in range(20))) self.assertEqual(crontab_parser(8, 1).parse('*/2'), set([1, 3, 5, 7])) self.assertEqual(crontab_parser(min_=1).parse('*/2'), set(i * 2 + 1 for i in range(30))) self.assertEqual(crontab_parser(min_=1).parse('*/3'), set(i * 3 + 1 for i in range(20)))
def test_run(self): prev, self.app.backend = self.app.backend, Mock() self.app.backend.cleanup = Mock() self.app.backend.cleanup.__name__ = 'cleanup' try: Chord = self.app.tasks['celery.chord'] body = dict() Chord(group(self.add.subtask((i, i)) for i in range(5)), body) Chord([self.add.subtask((j, j)) for j in range(5)], body) self.assertEqual(self.app.backend.on_chord_apply.call_count, 2) finally: self.app.backend = prev
def test_forward_options(self): body = self.xsum.s() x = chord([self.add.s(i, i) for i in range(10)], body=body) x.run = Mock(name='chord.run(x)') x.apply_async(group_id='some_group_id') x.run.assert_called() resbody = x.run.call_args[0][1] assert resbody.options['group_id'] == 'some_group_id' x2 = chord([self.add.s(i, i) for i in range(10)], body=body) x2.run = Mock(name='chord.run(x2)') x2.apply_async(chord='some_chord_id') x2.run.assert_called() resbody = x2.run.call_args[0][1] assert resbody.options['chord'] == 'some_chord_id'
def test_forward_options(self): body = self.xsum.s() x = chord([self.add.s(i, i) for i in range(10)], body=body) x.run = Mock(name="chord.run(x)") x.apply_async(group_id="some_group_id") x.run.assert_called() resbody = x.run.call_args[0][1] self.assertEqual(resbody.options["group_id"], "some_group_id") x2 = chord([self.add.s(i, i) for i in range(10)], body=body) x2.run = Mock(name="chord.run(x2)") x2.apply_async(chord="some_chord_id") x2.run.assert_called() resbody = x2.run.call_args[0][1] self.assertEqual(resbody.options["chord"], "some_chord_id")
def test_group_to_chord__protocol_2(self): c = ( group([self.add.s(i, i) for i in range(5)], app=self.app) | self.add.s(10) | self.add.s(20) | self.add.s(30) ) c._use_link = False tasks, _ = c.prepare_steps((), c.tasks) self.assertIsInstance(tasks[-1], chord) c2 = self.add.s(2, 2) | group(self.add.s(i, i) for i in range(10)) c2._use_link = False tasks2, _ = c2.prepare_steps((), c2.tasks) self.assertIsInstance(tasks2[0], group)
def test_first(self): iterations = [0] def predicate(value): iterations[0] += 1 if value == 5: return True return False self.assertEqual(5, first(predicate, range(10))) self.assertEqual(iterations[0], 6) iterations[0] = 0 self.assertIsNone(first(predicate, range(10, 20))) self.assertEqual(iterations[0], 10)
def test_first(): iterations = [0] def predicate(value): iterations[0] += 1 if value == 5: return True return False assert first(predicate, range(10)) == 5 assert iterations[0] == 6 iterations[0] = 0 assert first(predicate, range(10, 20)) is None assert iterations[0] == 10
def test_GroupResult_as_tuple(self): parent = self.app.AsyncResult(uuid()) result = self.app.GroupResult( 'group-result-1', [self.app.AsyncResult('async-result-{}'.format(i)) for i in range(2)], parent ) (result_id, parent_id), group_results = result.as_tuple() assert result_id == result.id assert parent_id == parent.id assert isinstance(group_results, list) expected_grp_res = [(('async-result-{}'.format(i), None), None) for i in range(2)] assert group_results == expected_grp_res
def _argsfromspec(spec, replace_defaults=True): if spec.defaults: split = len(spec.defaults) defaults = (list(range(len(spec.defaults))) if replace_defaults else spec.defaults) positional = spec.args[:-split] optional = list(zip(spec.args[-split:], defaults)) else: positional, optional = spec.args, [] varargs = spec.varargs varkw = spec.varkw if spec.kwonlydefaults: split = len(spec.kwonlydefaults) kwonlyargs = spec.kwonlyargs[:-split] if replace_defaults: kwonlyargs_optional = [ (kw, i) for i, kw in enumerate(spec.kwonlyargs[-split:])] else: kwonlyargs_optional = list(spec.kwonlydefaults.items()) else: kwonlyargs, kwonlyargs_optional = spec.kwonlyargs, [] return ', '.join(filter(None, [ ', '.join(positional), ', '.join('{0}={1}'.format(k, v) for k, v in optional), '*{0}'.format(varargs) if varargs else None, '*' if (kwonlyargs or kwonlyargs_optional) and not varargs else None, ', '.join(kwonlyargs) if kwonlyargs else None, ', '.join('{0}="{1}"'.format(k, v) for k, v in kwonlyargs_optional), '**{0}'.format(varkw) if varkw else None, ]))
def disconnect(self, receiver=None, sender=None, weak=True, dispatch_uid=None): """Disconnect receiver from sender for signal. If weak references are used, disconnect need not be called. The receiver will be removed from dispatch automatically. :keyword receiver: The registered receiver to disconnect. May be none if `dispatch_uid` is specified. :keyword sender: The registered sender to disconnect. :keyword weak: The weakref state to disconnect. :keyword dispatch_uid: the unique identifier of the receiver to disconnect """ if dispatch_uid: lookup_key = (dispatch_uid, _make_id(sender)) else: lookup_key = (_make_id(receiver), _make_id(sender)) for index in range(len(self.receivers)): (r_key, _) = self.receivers[index] if r_key == lookup_key: del self.receivers[index] break
def test_children(self): x = self.app.AsyncResult('1') children = [EagerResult(str(i), i, states.SUCCESS) for i in range(3)] x._cache = {'children': children, 'status': states.SUCCESS} x.backend = Mock() assert x.children assert len(x.children) == 3
def test_eager(self): from celery import chord @self.app.task(shared=False) def addX(x, y): return x + y @self.app.task(shared=False) def sumX(n): return sum(n) self.app.conf.task_always_eager = True x = chord(addX.s(i, i) for i in range(10)) body = sumX.s() result = x(body) self.assertEqual(result.get(), sum(i + i for i in range(10)))
def test_get_many(self): for is_dict in True, False: self.b.mget_returns_dict = is_dict ids = {uuid(): i for i in range(10)} for id, i in items(ids): self.b.mark_as_done(id, i) it = self.b.get_many(list(ids), interval=0.01) for i, (got_id, got_state) in enumerate(it): assert got_state['result'] == ids[got_id] assert i == 9 assert list(self.b.get_many(list(ids), interval=0.01)) self.b._cache.clear() callback = Mock(name='callback') it = self.b.get_many( list(ids), on_message=callback, interval=0.05 ) for i, (got_id, got_state) in enumerate(it): assert got_state['result'] == ids[got_id] assert i == 9 assert list( self.b.get_many(list(ids), interval=0.01) ) callback.assert_has_calls([ call(ANY) for id in ids ])
def test_start__stop(self): worker = self.worker worker.blueprint.shutdown_complete.set() worker.steps = [MockStep(StartStopStep(self)) for _ in range(4)] worker.blueprint.state = RUN worker.blueprint.started = 4 for w in worker.steps: w.start = Mock() w.close = Mock() w.stop = Mock() worker.start() for w in worker.steps: self.assertTrue(w.start.call_count) worker.consumer = Mock() worker.stop() for stopstep in worker.steps: self.assertTrue(stopstep.close.call_count) self.assertTrue(stopstep.stop.call_count) # Doesn't close pool if no pool. worker.start() worker.pool = None worker.stop() # test that stop of None is not attempted worker.steps[-1] = None worker.start() worker.stop()
def test_mlazy(): it = iter(range(20, 30)) p = mlazy(nextfun(it)) assert p() == 20 assert p.evaluated assert p() == 20 assert repr(p) == '20'
def test_get_many(self): b = self.create_backend() tids = [] for i in range(10): tid = uuid() b.store_result(tid, i, states.SUCCESS) tids.append(tid) res = list(b.get_many(tids, timeout=1)) expected_results = [(tid, { 'status': states.SUCCESS, 'result': i, 'traceback': None, 'task_id': tid, 'children': None }) for i, tid in enumerate(tids)] self.assertEqual(sorted(res), sorted(expected_results)) self.assertDictEqual(b._cache[res[0][0]], res[0][1]) cached_res = list(b.get_many(tids, timeout=1)) self.assertEqual(sorted(cached_res), sorted(expected_results)) b._cache[res[0][0]]['status'] = states.RETRY with self.assertRaises(socket.timeout): list(b.get_many(tids, timeout=0.01))
def run(self, names=None, iterations=50, offset=0, numtests=None, list_all=False, repeat=0, **kw): tests = self.filtertests(names)[offset:numtests or None] if list_all: return print(self.testlist(tests)) print(self.banner(tests)) it = count() if repeat == float('Inf') else range(int(repeat)) for i in it: marker( 'Stresstest suite start (repetition {0})'.format(i + 1), '+', ) for j, test in enumerate(tests): self.runtest(test, iterations, j + 1, i + 1) marker( 'Stresstest suite end (repetition {0})'.format(i + 1), '+', )
def get_task_meta(self, task_id, backlog_limit=1000): # Polling and using basic_get with self.app.pool.acquire_channel(block=True) as (_, channel): binding = self._create_binding(task_id)(channel) binding.declare() prev = latest = acc = None for i in range(backlog_limit): # spool ffwd acc = binding.get( accept=self.accept, no_ack=False, ) if not acc: # no more messages break if acc.payload['task_id'] == task_id: prev, latest = latest, acc if prev: # backends are not expected to keep history, # so we delete everything except the most recent state. prev.ack() prev = None else: raise self.BacklogLimitExceeded(task_id) if latest: payload = self._cache[task_id] = self.meta_from_decoded( latest.payload) latest.requeue() return payload else: # no new state, use previous try: return self._cache[task_id] except KeyError: # result probably pending. return {'status': states.PENDING, 'result': None}
def _argsfromspec(spec, replace_defaults=True): if spec.defaults: split = len(spec.defaults) defaults = (list(range(len(spec.defaults))) if replace_defaults else spec.defaults) positional = spec.args[:-split] optional = list(zip(spec.args[-split:], defaults)) else: positional, optional = spec.args, [] varargs = spec.varargs varkw = spec.varkw if spec.kwonlydefaults: split = len(spec.kwonlydefaults) kwonlyargs = spec.kwonlyargs[:-split] if replace_defaults: kwonlyargs_optional = [ (kw, i) for i, kw in enumerate(spec.kwonlyargs[-split:]) ] else: kwonlyargs_optional = list(spec.kwonlydefaults.items()) else: kwonlyargs, kwonlyargs_optional = spec.kwonlyargs, [] return ', '.join( filter(None, [ ', '.join(positional), ', '.join('{0}={1}'.format(k, v) for k, v in optional), '*{0}'.format(varargs) if varargs else None, '*' if (kwonlyargs or kwonlyargs_optional) and not varargs else None, ', '.join(kwonlyargs) if kwonlyargs else None, ', '.join('{0}="{1}"'.format(k, v) for k, v in kwonlyargs_optional), '**{0}'.format(varkw) if varkw else None, ]))
def join_native(self, timeout=None, propagate=True, interval=0.5, callback=None, no_ack=True, on_message=None, on_interval=None, disable_sync_subtasks=True): """Backend optimized version of :meth:`join`. .. versionadded:: 2.2 Note that this does not support collecting the results for different task types using different backends. This is currently only supported by the amqp, Redis and cache result backends. """ if disable_sync_subtasks: assert_will_not_block() order_index = None if callback else { result.id: i for i, result in enumerate(self.results) } acc = None if callback else [None for _ in range(len(self))] for task_id, meta in self.iter_native(timeout, interval, no_ack, on_message, on_interval): value = meta['result'] if propagate and meta['status'] in states.PROPAGATE_STATES: raise value if callback: callback(task_id, value) else: acc[order_index[task_id]] = value return acc
def test_apply_eager(self): self.app.conf.CELERY_ALWAYS_EAGER = True x = chord([self.add.s(i, i) for i in range(10)], body=self.xsum.s()) r = x.apply_async() self.assertEqual(r.get(), 90)
def test_update_larger_than_cache_size(self): x = LRUCache(2) x.update({x: x for x in range(100)}) self.assertEqual(list(x.keys()), [98, 99])
def test_starmap(self): self.assertTrue(self.retry_task.starmap(range(100)))
def alwayskilled(self): g = group(kill.s() for _ in range(10)) self.join(g(), timeout=10)
def test_get_many_times_out(self): tasks = [uuid() for _ in range(4)] self.b._cache[tasks[1]] = {'status': 'PENDING'} with pytest.raises(self.b.TimeoutError): list(self.b.get_many(tasks, timeout=0.01, interval=0.01))
def test_join(self): joined = self.ts.join() self.assertListEqual(joined, list(range(self.size)))
def test_apply_async(self): x = chord([self.add.s(i, i) for i in range(10)], body=self.xsum.s()) r = x.apply_async() self.assertTrue(r) self.assertTrue(r.parent)
def alwaysexits(self): g = group(exiting.s() for _ in range(10)) self.join(g(), timeout=10)
def test_repair_uuid(self): from celery.backends.amqp import repair_uuid for i in range(10): tid = uuid() self.assertEqual(repair_uuid(tid.replace('-', '')), tid)
def test_backlog_limit_exceeded(self): with self._result_context() as (results, backend, Message): for i in range(1001): results.put(Message(task_id='id', status=states.RECEIVED)) with self.assertRaises(backend.BacklogLimitExceeded): backend.get_task_meta('id')
def test_iterate_simple(self): it = self.ts.iterate() results = sorted(list(it)) self.assertListEqual(results, list(range(self.size)))
def test_bounded(self): x = BoundedSemaphore(2) for i in range(100): x.release() self.assertEqual(x.value, 2)
def make_mock_group(app, size=10): tasks = [mock_task('ts%d' % i, states.SUCCESS, i) for i in range(size)] [save_result(app, task) for task in tasks] return [app.AsyncResult(task['id']) for task in tasks]
def test_iterate_simple(self): with self.assertPendingDeprecation(): it = self.ts.iterate() results = sorted(list(it)) self.assertListEqual(results, list(range(self.size)))
def test_apply_eager(self): self.app.conf.task_always_eager = True x = chord([self.add.s(i, i) for i in range(10)], body=self.xsum.s()) r = x.apply_async() self.assertEqual(r.get(), 90)
def test_map(self): assert self.retry_task.map(range(100))
def test_chunks(self): assert self.retry_task.chunks(range(100), 10)
def test_chord_raises_error(self): with pytest.raises(NotImplementedError): chord(self.add.s(i, i) for i in range(10))(self.add.s([2]))
def test_starmap(self): assert self.retry_task.starmap(range(100))
def test_apply_eager_with_arguments(self): self.app.conf.task_always_eager = True x = chord([self.add.s(i) for i in range(10)], body=self.xsum.s()) r = x.apply_async([1]) assert r.get() == 55
def test_chunks(self): self.assertTrue(self.retry_task.chunks(range(100), 10))
def test_run_header_not_group(self): self.task([self.add.s(i, i) for i in range(10)], self.xsum.s())
def test_iterate_simple(self): with pytest.warns(CPendingDeprecationWarning): it = self.ts.iterate() results = sorted(list(it)) assert results == list(range(self.size))
def runtest(self, fun, n=50, index=0, repeats=1): n = getattr(fun, '__iterations__', None) or n header = '[[[{0}({1})]]]'.format(fun.__name__, n) if repeats > 1: header = '{0} #{1}'.format(header, repeats) self.print(header) with blockdetection(self.block_timeout): with self.fbi.investigation(): runtime = elapsed = monotonic() i = 0 failed = False self.progress = Progress( fun, i, n, index, repeats, elapsed, runtime, 0, ) _marker.delay(pstatus(self.progress)) try: for i in range(n): runtime = monotonic() self.progress = Progress( fun, i + 1, n, index, repeats, runtime, elapsed, 0, ) self.execute_test(fun) except Exception: failed = True self.speaker.beep() raise finally: if n > 1 or failed: self.print('{0} {1} iterations in {2}'.format( 'failed after' if failed else 'completed', i + 1, humanize_seconds(monotonic() - elapsed), ), file=self.stderr if failed else self.stdout) if not failed: self.progress = Progress( fun, i + 1, n, index, repeats, runtime, elapsed, 1, )
def test_join(self): joined = self.ts.join() assert joined == list(range(self.size))