def test_loop(self): acc = [] started = Signal() def work(please_stop): started.go() while not please_stop: acc.append(Date.now().unix) Till(seconds=0.1).wait() worker = Thread.run("loop", work) started.wait() while len(acc) < 10: Till(seconds=0.1).wait() worker.stop() worker.join() # We expect 10, but 9 is good enough num = len(acc) self.assertGreater( num, 9, "Expecting some reasonable number of entries to prove there was looping, not " + text(num), )
def test_memory_cleanup_with_till(self): objgraph.growth() root = Signal() for i in range(100000): if i % 1000 == 0: Log.note("at {{num}} tills", num=i) root = root | Till(seconds=100000) mid_mem = psutil.Process(os.getpid()).memory_info().rss if mid_mem > 1000 * 1000 * 1000: Log.note("{{num}} Till triggers created", num=i) break trigger = Signal() root = root | trigger growth = objgraph.growth(limit=4) growth and Log.note("More object\n{{growth}}", growth=growth) trigger.go() root.wait() # THERE SHOULD BE NO DELAY HERE for _ in range(0, 20): try: Till(seconds=0.1).wait() # LET TIMER DAEMON CLEANUP current = [(t, objgraph.count(t), objgraph.count(t) - c) for t, c, d in growth] Log.note("Object count\n{{current}}", current=current) # NUMBER OF OBJECTS CLEANED UP SHOULD MATCH NUMBER OF OBJECTS CREATED for (_, _, cd), (_, _, gd) in zip(current, growth): self.assertAlmostEqual(-cd, gd, places=2) return except Exception as e: pass Log.error("object counts did not go down")
def test_lock_and_till(self): locker = Lock("prime lock") got_lock = Signal() a_is_ready = Signal("a lock") b_is_ready = Signal("b lock") def loop(is_ready, please_stop): with locker: while not got_lock: # Log.note("{{thread}} is waiting", thread=Thread.current().name) locker.wait(till=Till(seconds=0)) is_ready.go() locker.wait() Log.note("thread is expected to get here") thread_a = Thread.run("a", loop, a_is_ready) thread_b = Thread.run("b", loop, b_is_ready) a_is_ready.wait() b_is_ready.wait() with locker: got_lock.go() Till(seconds=0.1).wait() # MUST WAIT FOR a AND b TO PERFORM locker.wait() Log.note("leaving") pass with locker: Log.note("leaving again") pass Till(seconds=1).wait() self.assertTrue(bool(thread_a.stopped), "Thread should be done by now") self.assertTrue(bool(thread_b.stopped), "Thread should be done by now")
def test_queue_speed(self): SCALE = 1000*10 done = Signal("done") slow = Queue() q = ThreadedQueue("test queue", queue=slow) def empty(please_stop): while not please_stop: item = q.pop() if item is THREAD_STOP: break done.go() Thread.run("empty", empty) timer = Timer("add {{num}} to queue", param={"num": SCALE}) with timer: for i in range(SCALE): q.add(i) q.add(THREAD_STOP) Log.note("Done insert") done.wait() self.assertLess(timer.duration.seconds, 1.5, "Expecting queue to be fast")
def request(self, method, path, headers): now = Date.now() self.inbound_rate.add(now) ready = Signal(path) # TEST CACHE with self.cache_locker: pair = self.cache.get(path) if pair is None: self.cache[path] = (ready, None, None, now) if pair is not None: # REQUEST IS IN THE QUEUE ALREADY, WAIT ready, headers, response, then = pair if response is None: ready.wait() with self.cache_locker: ready, headers, response, timestamp = self.cache.get(path) with self.db.transaction() as t: t.execute("UPDATE cache SET timestamp=" + quote_value(now) + " WHERE path=" + quote_value(path) + " AND timestamp<" + quote_value(now)) return Response( response, status=200, headers=json.loads(headers) ) # TEST DB db_response = self.db.query("SELECT headers, response FROM cache WHERE path=" + quote_value(path)).data if db_response: headers, response = db_response[0] with self.db.transaction() as t: t.execute("UPDATE cache SET timestamp=" + quote_value(now) + " WHERE path=" + quote_value(path) + " AND timestamp<" + quote_value(now)) with self.cache_locker: self.cache[path] = (ready, headers, response.encode('latin1'), now) ready.go() return Response( response, status=200, headers=json.loads(headers) ) # MAKE A NETWORK REQUEST self.todo.add((ready, method, path, headers, now)) ready.wait() with self.cache_locker: ready, headers, response, timestamp = self.cache[path] return Response( response, status=200, headers=json.loads(headers) )
def query(self, command): """ WILL BLOCK CALLING THREAD UNTIL THE command IS COMPLETED :param command: COMMAND FOR SQLITE :return: list OF RESULTS """ if not self.worker: self.worker = Thread.run("sqlite db thread", self._worker) signal = Signal() result = Data() self.queue.add((command, result, signal, None)) signal.wait() if result.exception: Log.error("Problem with Sqlite call", cause=result.exception) return result
def _test_queue_speed(self, test=False): SCALE = 1000 * 10 done = Signal("done") slow = Queue() q = ThreadedQueue("test queue", slow_queue=slow) def empty(please_stop): while not please_stop: item = slow.pop() if item is THREAD_STOP: break done.go() Thread.run("empty", empty) timer = Timer("add {{num}} to queue", param={"num": SCALE}) with timer: for i in range(SCALE): q.add(i) q.add(THREAD_STOP) Log.note("Done insert") done.wait() Log.note( "{{num}} items through queue in {{seconds|round(3)}} seconds", num=SCALE, seconds=timer.duration.seconds, ) if PY2 and "windows" not in platform.system().lower(): expected_time = 15 # LINUX PY2 IS CRAZY SLOW elif PY3 and "windows" not in platform.system().lower(): expected_time = 6 # LINUX PY3 IS SLOW else: expected_time = 6 if test: self.assertLess( timer.duration.seconds, expected_time, "Expecting queue to be fast, not " + text(timer.duration.seconds) + " seconds", )
def test_lock_and_till(self): locker = Lock("prime lock") got_signal = Signal() a_is_ready = Signal("a lock") b_is_ready = Signal("b lock") Log.note("begin") def loop(is_ready, please_stop): with locker: while not got_signal: locker.wait(till=Till(seconds=0.01)) is_ready.go() Log.note("{{thread}} is ready", thread=Thread.current().name) Log.note("outside loop") locker.wait() Log.note("thread is expected to get here") thread_a = Thread.run("a", loop, a_is_ready).release() thread_b = Thread.run("b", loop, b_is_ready).release() a_is_ready.wait() b_is_ready.wait() timeout = Till(seconds=1) with locker: got_signal.go() while not thread_a.stopped: # WE MUST CONTINUE TO USE THE locker TO ENSURE THE OTHER THREADS ARE NOT ORPHANED IN THERE locker.wait(till=Till(seconds=0.1)) Log.note("wait for a thread") while not thread_b.stopped: # WE MUST CONTINUE TO USE THE locker TO ENSURE THE OTHER THREADS ARE NOT ORPHANED IN THERE locker.wait(till=Till(seconds=0.1)) Log.note("wait for b thread") thread_a.join() thread_b.join() if timeout: Log.error("Took too long") self.assertTrue(bool(thread_a.stopped), "Thread should be done by now") self.assertTrue(bool(thread_b.stopped), "Thread should be done by now")
class Python(object): def __init__(self, name, config): config = wrap(config) if config.debug.logs: Log.error("not allowed to configure logging on other process") self.process = Process(name, [PYTHON, "mo_threads" + os.sep + "python_worker.py"], shell=True) self.process.stdin.add(value2json(set_default({"debug": {"trace": True}}, config))) self.lock = Lock("wait for response from "+name) self.current_task = None self.current_response = None self.current_error = None self.daemon = Thread.run("", self._daemon) self.errors = Thread.run("", self._stderr) def _execute(self, command): with self.lock: if self.current_task is not None: self.current_task.wait() self.current_task = Signal() self.current_response = None self.current_error = None self.process.stdin.add(value2json(command)) self.current_task.wait() with self.lock: try: if self.current_error: Log.error("problem with process call", cause=Except.new_instance(self.current_error)) else: return self.current_response finally: self.current_task = None self.current_response = None self.current_error = None def _daemon(self, please_stop): while not please_stop: line = self.process.stdout.pop(till=please_stop) if line == THREAD_STOP: break try: data = json2value(line.decode('utf8')) if "log" in data: Log.main_log.write(*data.log) elif "out" in data: with self.lock: self.current_response = data.out self.current_task.go() elif "err" in data: with self.lock: self.current_error = data.err self.current_task.go() except Exception: Log.note("non-json line: {{line}}", line=line) DEBUG and Log.note("stdout reader is done") def _stderr(self, please_stop): while not please_stop: try: line = self.process.stderr.pop(till=please_stop) if line == THREAD_STOP: please_stop.go() break Log.note("Error line from {{name}}({{pid}}): {{line}}", line=line, name=self.process.name, pid=self.process.pid) except Exception as e: Log.error("could not process line", cause=e) def import_module(self, module_name, var_names=None): if var_names is None: self._execute({"import": module_name}) else: self._execute({"import": {"from": module_name, "vars": var_names}}) def set(self, var_name, value): self._execute({"set": {var_name, value}}) def get(self, var_name): return self._execute({"get": var_name}) def execute_script(self, script): return self._execute({"exec": script}) def __getattr__(self, item): def output(*args, **kwargs): if len(args): if len(kwargs.keys()): Log.error("Not allowed to use both args and kwargs") return self._execute({item: args}) else: return self._execute({item: kwargs}) return output def stop(self): self._execute({"stop": {}}) self.process.join() self.daemon.stop() self.errors.stop()
class AsyncResult(object): """ Query task state. """ all_results = {} all_results_lock = Lock() mail = None def __new__(cls, id, mail=None, app=None): id = int(id) with AsyncResult.all_results_lock: output = AsyncResult.all_results.get(id) if not output: output = AsyncResult.all_results[id] = object.__new__(cls) if mail: output.mail = mail return output def __init__(self, id, mail=None, app=None): self.id = int(id) self.mail = coalesce(self.mail, mail) self.app = app self._cache = None self._ready = Signal() def as_tuple(self): parent = self.parent return (self.id, parent and parent.as_tuple()), None serializable = as_tuple # XXX compat def forget(self): """Forget about (and possibly remove the result of) this task.""" self._cache = None self.backend.forget(self.id) def revoke(self, connection=None, terminate=False, signal=None, wait=False, timeout=None): self.app.response_queue.add( value2json({ "request": { "id": self.id }, "status": states.REVOKED })) def get(self, timeout=None, propagate=True, interval=0.5, no_ack=True, follow_parents=True): """Wait until task is ready, and return its result. .. warning:: Waiting for tasks within a task may lead to deadlocks. Please read :ref:`task-synchronous-subtasks`. :keyword timeout: How long to wait, in seconds, before the operation times out. :keyword propagate: Re-raise exception if the task failed. :keyword interval: Time to wait (in seconds) before retrying to retrieve the result. Note that this does not have any effect when using the amqp result store backend, as it does not use polling. :keyword no_ack: Enable amqp no ack (automatically acknowledge message). If this is :const:`False` then the message will **not be acked**. :keyword follow_parents: Reraise any exception raised by parent task. :raises celery.exceptions.TimeoutError: if `timeout` is not :const:`None` and the result does not arrive within `timeout` seconds. If the remote call raised an exception then that exception will be re-raised. """ self._ready.wait() on_interval = None if follow_parents and propagate and self.parent: on_interval = self._maybe_reraise_parent_error on_interval() if self._cache: if propagate: self.maybe_reraise() return self.result meta = self.backend.wait_for( self.id, timeout=timeout, interval=interval, on_interval=on_interval, no_ack=no_ack, ) if meta: self._maybe_set_cache(meta) status = meta['status'] if status in PROPAGATE_STATES and propagate: raise meta['result'] return meta['result'] wait = get # deprecated alias to :meth:`get`. def _maybe_reraise_parent_error(self): for node in reversed(list(self._parents())): node.maybe_reraise() def _parents(self): node = self.parent while node: yield node node = node.parent def get_leaf(self): value = None for _, R in self.iterdeps(): value = R.get() return value def iterdeps(self, intermediate=False): stack = deque([(None, self)]) while stack: parent, node = stack.popleft() yield parent, node if node.ready(): stack.extend((node, child) for child in node.children or []) else: if not intermediate: raise IncompleteStream() def ready(self): """Returns :const:`True` if the task has been executed. If the task is still running, pending, or is waiting for retry then :const:`False` is returned. """ return bool(self._ready) def successful(self): """Returns :const:`True` if the task executed successfully.""" return self.state == states.SUCCESS def failed(self): """Returns :const:`True` if the task failed.""" return self.state == states.FAILURE def maybe_reraise(self): if self.state in states.PROPAGATE_STATES: raise self.result def __str__(self): """`str(self) -> self.id`""" return str(self.id) def __hash__(self): """`hash(self) -> hash(self.id)`""" return hash(self.id) def __repr__(self): return '<{0}: {1}>'.format(type(self).__name__, self.id) def __eq__(self, other): if isinstance(other, AsyncResult): return other.id == self.id elif isinstance(other, basestring): return other == self.id return NotImplemented def __ne__(self, other): return not self.__eq__(other) def __copy__(self): return self.__class__( self.id, self.backend, self.task_name, self.app, self.parent, ) def __reduce__(self): return self.__class__, self.__reduce_args__() def __reduce_args__(self): return self.id, self.backend, self.task_name, None, self.parent def __del__(self): self._cache = None @property def supports_native_join(self): return self.backend.supports_native_join @property def children(self): return self._get_task_meta().get('children') def _maybe_set_cache(self, meta): if meta: state = meta['status'] if state == states.SUCCESS or state in states.PROPAGATE_STATES: return self._set_cache(meta) return meta def _get_task_meta(self): """ RETURN REDASH-EXPECTED METADATA """ if not self.mail: return {"status": states.PENDING, "result": {}} output = Data(copy(self.mail)) if output.result == None: output.result = {} if output.status == states.PENDING: output.result.start_time = output.response.start_time return unwrap(output) @property def result(self): """When the task has been executed, this contains the return value. If the task raised an exception, this will be the exception instance.""" return self.mail.result info = result @property def traceback(self): """Get the traceback of a failed task.""" return self._get_task_meta().get('traceback') @property def state(self): return self.mail.status status = state @property def task_id(self): """compat alias to :attr:`id`""" return self.id @task_id.setter # noqa def task_id(self, id): self.id = id