def test_lock_and_till(self): locker = Lock("prime lock") got_signal = Signal() a_is_ready = Signal("a lock") b_is_ready = Signal("b lock") Log.note("begin") def loop(is_ready, please_stop): with locker: while not got_signal: locker.wait(till=Till(seconds=0.01)) is_ready.go() Log.note("{{thread}} is ready", thread=Thread.current().name) Log.note("outside loop") locker.wait() Log.note("thread is expected to get here") thread_a = Thread.run("a", loop, a_is_ready).release() thread_b = Thread.run("b", loop, b_is_ready).release() a_is_ready.wait() b_is_ready.wait() timeout = Till(seconds=1) with locker: got_signal.go() while not thread_a.stopped: # WE MUST CONTINUE TO USE THE locker TO ENSURE THE OTHER THREADS ARE NOT ORPHANED IN THERE locker.wait(till=Till(seconds=0.1)) Log.note("wait for a thread") while not thread_b.stopped: # WE MUST CONTINUE TO USE THE locker TO ENSURE THE OTHER THREADS ARE NOT ORPHANED IN THERE locker.wait(till=Till(seconds=0.1)) Log.note("wait for b thread") thread_a.join() thread_b.join() if timeout: Log.error("Took too long") self.assertTrue(bool(thread_a.stopped), "Thread should be done by now") self.assertTrue(bool(thread_b.stopped), "Thread should be done by now")
def killer(self, please_stop): self.current.stop() ( please_stop | self.current.service_stopped() | Till(seconds=self.wait_for_shutdown.seconds) ).wait() if not self.current.service_stopped: self.fail_count += 1 self.current.kill() self.current.join()
def delete_daemon(file, please_stop): # WINDOWS WILL HANG ONTO A FILE FOR A BIT AFTER WE CLOSED IT while not please_stop: try: file.delete() return except Exception as e: Log.warning(u"problem deleting file {{file}}", file=file.abspath, cause=e) Till(seconds=1).wait()
def create_index( self, index, alias=None, create_timestamp=None, schema=None, limit_replicas=None, read_only=False, tjson=False, kwargs=None ): if not alias: alias = kwargs.alias = kwargs.index index = kwargs.index = proto_name(alias, create_timestamp) if kwargs.alias == index: Log.error("Expecting index name to conform to pattern") if kwargs.schema_file: Log.error('schema_file attribute not supported. Use {"$ref":<filename>} instead') if schema == None: Log.error("Expecting a schema") elif isinstance(schema, basestring): schema = mo_json.json2value(schema, leaves=True) else: schema = mo_json.json2value(convert.value2json(schema), leaves=True) if limit_replicas: # DO NOT ASK FOR TOO MANY REPLICAS health = self.get("/_cluster/health") if schema.settings.index.number_of_replicas >= health.number_of_nodes: Log.warning("Reduced number of replicas: {{from}} requested, {{to}} realized", {"from": schema.settings.index.number_of_replicas}, to= health.number_of_nodes - 1 ) schema.settings.index.number_of_replicas = health.number_of_nodes - 1 self.post( "/" + index, data=schema, headers={"Content-Type": "application/json"} ) # CONFIRM INDEX EXISTS while True: try: state = self.get("/_cluster/state", retry={"times": 5}, timeout=3) if index in state.metadata.indices: break Log.note("Waiting for index {{index}} to appear", index=index) except Exception, e: Log.warning("Problem while waiting for index {{index}} to appear", index=index, cause=e) Till(seconds=1).wait()
def _test_mode_wait(query): """ WAIT FOR METADATA TO ARRIVE ON INDEX :param query: dict() OF REQUEST BODY :return: nothing """ try: m = meta.singlton now = Date.now() end_time = now + MINUTE # MARK COLUMNS DIRTY m.meta.columns.update({ "clear": ["partitions", "count", "cardinality", "last_updated"], "where": { "eq": { "es_index": join_field(split_field(query["from"])[0:1]) } } }) # BE SURE THEY ARE ON THE todo QUEUE FOR RE-EVALUATION cols = [ c for c in m.get_columns(table_name=query["from"], force=True) if c.type not in STRUCT ] for c in cols: Log.note("Mark {{column}} dirty at {{time}}", column=c.names["."], time=now) c.last_updated = now - TOO_OLD m.todo.push(c) while end_time > now: # GET FRESH VERSIONS cols = [ c for c in m.get_columns(table_name=query["from"]) if c.type not in STRUCT ] for c in cols: if not c.last_updated or c.cardinality == None: Log.note( "wait for column (table={{col.es_index}}, name={{col.es_column}}) metadata to arrive", col=c) break else: break Till(seconds=1).wait() for c in cols: Log.note( "fresh column name={{column.name}} updated={{column.last_updated|date}} parts={{column.partitions}}", column=c) except Exception, e: Log.warning("could not pickup columns", cause=e)
def query_and_wait(self, sql): job = self.client.query(text(sql)) while job.state == "RUNNING": DEBUG and Log.note( "job {{id}} state = {{state}}", id=job.job_id, state=job.state ) Till(seconds=1).wait() job = self.client.get_job(job.job_id) DEBUG and Log.note( "job {{id}} state = {{state}}", id=job.job_id, state=job.state ) return job
def worker(logger, please_stop): try: while not please_stop: Till(seconds=1).wait() logs = self.queue.pop_all() for log in logs: if log is THREAD_STOP: please_stop.go() else: logger.write(**log) finally: logger.stop()
def try_till_response(self, *args, **kwargs): while True: try: response = self.server.get(*args, **kwargs) return response except Exception as e: e = Except.wrap(e) if "No connection could be made because the target machine actively refused it" in e or "Connection refused" in e: Log.alert("Problem connecting") Till(seconds=WAIT_AFTER_PROBLEM).wait() else: Log.error("Server raised exception", e)
def _insert_loop(self, please_stop=None): bad_count = 0 while not please_stop: try: messages = wrap(self.queue.pop_all()) if not messages: Till(seconds=1).wait() continue for g, mm in jx.groupby(messages, size=self.batch_size): scrubbed = [] for i, message in enumerate(mm): if message is THREAD_STOP: please_stop.go() return try: scrubbed.append( _deep_json_to_string(message, depth=3)) except Exception as e: Log.warning("Problem adding to scrubbed list", cause=e) self.es.extend(scrubbed) bad_count = 0 except Exception as f: Log.warning("Problem inserting logs into ES", cause=f) bad_count += 1 if bad_count > MAX_BAD_COUNT: Log.warning( "Given up trying to write debug logs to ES index {{index}}", index=self.es.settings.index) Till(seconds=30).wait() # CONTINUE TO DRAIN THIS QUEUE while not please_stop: try: Till(seconds=1).wait() self.queue.pop_all() except Exception as e: Log.warning("Should not happen", cause=e)
def _cache_cleaner(self, please_stop): while not please_stop: now = Date.now() too_old = now-CACHE_RETENTION remove = set() with self.cache_locker: for path, (ready, headers, response, timestamp) in self.cache: if timestamp < too_old: remove.add(path) for r in remove: del self.cache[r] (please_stop | Till(seconds=CACHE_RETENTION.seconds / 2)).wait()
def run_threads_daemon(self, please_stop=None): while not please_stop: try: with self.threads_locker: Log.note( "Currently {{waiting}} waiting to get annotation, and {{threads}} waiting to be created.", waiting=self.waiting, threads=self.threads_waiting) (Till(seconds=DAEMON_WAIT_FOR_THREADS.seconds) | please_stop).wait() except Exception as e: Log.warning("Unexpected error in pc-daemon: {{cause}}", cause=e)
def test_mode_wait(query): """ WAIT FOR METADATA TO ARRIVE ON INDEX :param query: dict() OF REQUEST BODY :return: nothing """ if not query["from"]: return try: if query["from"].startswith("meta."): return now = Date.now() alias = split_field(query["from"])[0] metadata_manager = find_container(alias).namespace metadata_manager.meta.tables[ alias].timestamp = now # TRIGGER A METADATA RELOAD AFTER THIS TIME timeout = Till(seconds=MINUTE.seconds) while not timeout: # GET FRESH VERSIONS cols = [ c for c in metadata_manager.get_columns( table_name=alias, after=now, timeout=timeout) if c.jx_type not in STRUCT ] for c in cols: if now >= c.last_updated: Log.note( "wait for column (table={{col.es_index}}, name={{col.es_column}}) metadata to arrive", col=c) break else: break Till(seconds=1).wait() except Exception as e: Log.warning("could not pickup columns", cause=e)
def trigger_job(self): while self.please_stop: now = Date.now() next = now + DAY for j in self.jobs: if j.next_run_time < now: j.next_run_time = next_run(j) self.run_job(j) next = Date.min(next, j.next_run_time) (Till(till=next) | self.please_stop).wait()
def test_note_keyword_parameters(self): a = {"c": "a", "b": "d"} b = {"c": "b"} params = {"a": a, "b": b} WARNING = 'test' A = '{\n "b": "d",\n "c": "a"\n}' B = '{"c": "b"}' AC = 'a' AB = 'd' BC = 'b' # DURING TESTING SOME OTHER THREADS MAY STILL BE WRITING TO THE LOG Till(seconds=1).wait() # HIGHJACK LOG FOR TESTING OUTPUT log_queue = StructuredLogger_usingQueue() backup_log, Log.main_log = Log.main_log, log_queue try: raise Exception("problem") except Exception as e: Log.note("test") self.assertEqual(Log.main_log.pop(), WARNING) Log.note("test: {{a}}", a=a) self.assertEqual(Log.main_log.pop(), WARNING + ': ' + A) Log.note("test: {{a}}: {{b}}", a=a, b=b) self.assertEqual(Log.main_log.pop(), WARNING + ': ' + A + ': ' + B) Log.note("test: {{a.c}}", a=a) self.assertEqual(Log.main_log.pop(), WARNING + ': ' + AC) Log.note("test: {{a}}: {{b}}", params) self.assertEqual(Log.main_log.pop(), WARNING + ': ' + A + ': ' + B) Log.note("test: {{a}}: {{b}}", params, a=b) self.assertEqual(Log.main_log.pop(), WARNING + ': ' + B + ': ' + B) Log.note("test: {{a}}: {{b}}", wrap(params), a=b) self.assertEqual(Log.main_log.pop(), WARNING + ': ' + B + ': ' + B) Log.note("test: {{a.c}}: {{a.b}}", a=a, b=b) self.assertEqual(Log.main_log.pop(), WARNING + ': ' + AC + ': ' + AB) Log.note("test: {{a.c}}: {{b.c}}", a=a, b=b) self.assertEqual(Log.main_log.pop(), WARNING + ': ' + AC + ': ' + BC) finally: Log.main_log = backup_log
def get_columns(self, table_name, column_name=None, force=False): """ RETURN METADATA COLUMNS """ table_path = split_field(table_name) es_index_name = table_path[0] query_path = join_field(table_path[1:]) table = self.get_table(es_index_name)[0] abs_column_name = None if column_name == None else concat_field( query_path, column_name) try: # LAST TIME WE GOT INFO FOR THIS TABLE if not table: table = Table(name=es_index_name, url=None, query_path=['.'], timestamp=Date.now()) with self.meta.tables.locker: self.meta.tables.add(table) self._get_columns(table=es_index_name) elif force or table.timestamp == None or table.timestamp < Date.now( ) - MAX_COLUMN_METADATA_AGE: table.timestamp = Date.now() self._get_columns(table=es_index_name) with self.meta.columns.locker: columns = self.meta.columns.find(es_index_name, column_name) if columns: columns = jx.sort(columns, "names.\.") # AT LEAST WAIT FOR THE COLUMNS TO UPDATE while len(self.todo) and not all(columns.get("last_updated")): if DEBUG: Log.note( "waiting for columns to update {{columns|json}}", columns=[ c.es_index + "." + c.es_column for c in columns if not c.last_updated ]) Till(seconds=1).wait() return columns except Exception as e: Log.error("Not expected", cause=e) if abs_column_name: Log.error("no columns matching {{table}}.{{column}}", table=table_name, column=abs_column_name) else: self._get_columns(table=table_name) # TO TEST WHAT HAPPENED Log.error("no columns for {{table}}?!", table=table_name)
def test_save_then_load(self): test = { "data": [{ "a": "b" }], "query": { "meta": { "save": True }, "from": TEST_TABLE, "select": "a" }, "expecting_list": { "meta": { "format": "list" }, "data": ["b"] } } settings = self.utils.fill_container(test) bytes = unicode2utf8( value2json({ "from": settings.index, "select": "a", "format": "list" })) expected_hash = convert.bytes2base64( hashlib.sha1(bytes).digest()[0:6]).replace("/", "_") wrap(test).expecting_list.meta.saved_as = expected_hash self.utils.send_queries(test) # ENSURE THE QUERY HAS BEEN INDEXED Log.note("Flush saved query (with hash {{hash}})", hash=expected_hash) container = elasticsearch.Index(index="saved_queries", type=save_query.DATA_TYPE, kwargs=settings) container.flush(forced=True) with Timer("wait for 5 seconds"): Till(seconds=5).wait() url = URL(self.utils.testing.query) response = self.utils.try_till_response(url.scheme + "://" + url.host + ":" + text_type(url.port) + "/find/" + expected_hash, data=b'') self.assertEqual(response.status_code, 200) self.assertEqual(response.all_content, bytes)
def test_get_push1(self): central = [ b for b in self.hg.branches if b.name == "mozilla-central" and b.locale == "en-US" ][0] test = self.hg._get_push(central, "b6b8e616de32") expected = { "date": 1503659542, "user": "******", "id": 32390 } self.assertEqual(test, expected) while len(self.hg.todo.queue): Till(seconds=1).wait()
def delete_daemon(file, caller_stack, please_stop): # WINDOWS WILL HANG ONTO A FILE FOR A BIT AFTER WE CLOSED IT while not please_stop: try: file.delete() return except Exception as e: e = Except.wrap(e) e.trace = e.trace[0:2] + caller_stack Log.warning(u"problem deleting file {{file}}", file=file.abspath, cause=e) (Till(seconds=10) | please_stop).wait()
def setup( self, instance, # THE boto INSTANCE OBJECT FOR THE MACHINE TO SETUP utility # THE utility OBJECT FOUND IN CONFIG ): with self.locker: if not self.settings.setup_timeout: Log.error( "expecting instance.setup_timeout to prevent setup from locking" ) def worker(please_stop): cpu_count = int(round(utility.cpu)) with hide('output'): Log.note("setup {{instance}}", instance=instance.id) self._config_fabric(instance) Log.note("update packages on {{instance}} ip={{ip}}", instance=instance.id, ip=instance.ip_address) try: self._update_ubuntu_packages() except Exception as e: Log.warning( "Can not setup {{instance}}, type={{type}}", instance=instance.id, type=instance.instance_type, cause=e) return Log.note("setup etl on {{instance}}", instance=instance.id) self._setup_etl_code() Log.note("setup grcov on {{instance}}", instance=instance.id) self._setup_grcov() Log.note("add config file on {{instance}}", instance=instance.id) self._add_private_file() Log.note("setup supervisor on {{instance}}", instance=instance.id) self._setup_etl_supervisor(cpu_count) Log.note("setup done {{instance}}", instance=instance.id) worker_thread = Thread.run( "etl setup started at " + unicode(Date.now().format()), worker) (Till(timeout=Duration(self.settings.setup_timeout).seconds) | worker_thread.stopped).wait() if not worker_thread.stopped: Log.error("critical failure in thread {{name|quote}}", name=worker_thread.name) worker_thread.join()
def run_memory_daemon(self, please_stop): while not please_stop: try: (Till(seconds=DAEMON_MEMORY_LOG_INTERVAL.seconds) | please_stop).wait() mem = psutil.virtual_memory() Log.note("TUID Process - complete memory info: {{mem}}", mem=str(mem)) Log.note("\nOpen threads ({{num}}):", num=len(ALL)) Log.note("{{data}}", data=str({i: ALL[i].name for i in ALL})) except Exception as e: Log.warning( "Error encountered while trying to log memory: {{cause}}", cause=e)
def _daemon(self, please_stop): while not please_stop: timestamp = Date.now() with self.lock: decay = METRIC_DECAY_RATE**(timestamp - self.last_request).seconds request_rate = self.request_rate = decay * self.request_rate self.last_request = timestamp Log.note( "{{name}} request rate: {{rate|round(places=2)}} requests per second", name=self.name, rate=request_rate) (please_stop | Till(seconds=METRIC_REPORT_PERIOD.seconds)).wait()
def monitor(self, please_stop): please_stop.on_go(lambda: self.todo.add(THREAD_STOP)) while not please_stop: try: if not self.todo: with self.meta.columns.locker: old_columns = [ c for c in self.meta.columns if (c.last_updated == None or c.last_updated < Date.now() - TOO_OLD) and c.type not in STRUCT ] if old_columns: if DEBUG: Log.note( "Old columns wth dates {{dates|json}}", dates=wrap(old_columns).last_updated) self.todo.extend(old_columns) # TEST CONSISTENCY for c, d in product(list(self.todo.queue), list(self.todo.queue)): if c.es_column == d.es_column and c.es_index == d.es_index and c != d: Log.error("") else: if DEBUG: Log.note("no more metatdata to update") column = self.todo.pop(Till(seconds=(10 * MINUTE).seconds)) if column: if DEBUG: Log.note("update {{table}}.{{column}}", table=column.es_index, column=column.es_column) if column.type in STRUCT: with self.meta.columns.locker: column.last_updated = Date.now() continue elif column.last_updated >= Date.now() - TOO_OLD: continue try: self._update_cardinality(column) if DEBUG and not column.es_index.startswith( TEST_TABLE_PREFIX): Log.note("updated {{column.name}}", column=column) except Exception as e: Log.warning( "problem getting cardinality for {{column.name}}", column=column, cause=e) except Exception as e: Log.warning("problem in cardinality monitor", cause=e)
def get_old_cset_revnum(self, revision): self.csets_todo_backwards.add((revision, True)) revnum = None timeout = Till(seconds=BACKFILL_REVNUM_TIMEOUT) while not timeout: with self.conn.transaction() as t: revnum = self._get_one_revnum(t, revision) if revnum and revnum[0] >= 0: break elif revnum[0] < 0: Log.note("Waiting for table to recompute...") else: Log.note("Waiting for backfill to complete...") Till(seconds=CSET_BACKFILL_WAIT_TIME).wait() if timeout: Log.error( "Cannot find revision {{rev}} after waiting {{timeout}} seconds", rev=revision, timeout=BACKFILL_REVNUM_TIMEOUT) return revnum
def _wait_for_queue_space(self, timeout=DEFAULT_WAIT_TIME): """ EXPECT THE self.lock TO BE HAD, WAITS FOR self.queue TO HAVE A LITTLE SPACE """ wait_time = 5 if DEBUG and len(self.queue) > 1 * 1000 * 1000: Log.warning("Queue {{name}} has over a million items") now = time() if timeout != None: time_to_stop_waiting = now + timeout else: time_to_stop_waiting = Null if self.next_warning < now: self.next_warning = now + wait_time while not self.please_stop and len(self.queue) >= self.max: if now > time_to_stop_waiting: if not _Log: _late_import() _Log.error(THREAD_TIMEOUT) if self.silent: self.lock.wait(Till(till=time_to_stop_waiting)) else: self.lock.wait(Till(timeout=wait_time)) if len(self.queue) >= self.max: now = time() if self.next_warning < now: self.next_warning = now + wait_time _Log.alert( "Queue by name of {{name|quote}} is full with ({{num}} items), thread(s) have been waiting {{wait_time}} sec", name=self.name, num=len(self.queue), wait_time=wait_time)
def _delete_columns(self, please_stop): while not please_stop: result = self.delete_queue.pop(till=please_stop) if result == THREAD_STOP: break more_result = self.delete_queue.pop_all() results = [result] + more_result try: delete_result = self.es_index.delete_record({ "bool": { "should": [{ "bool": { "must": [{ "term": { "es_index.~s~": es_index } }, { "range": { "last_updated.~n~": { "lte": after.unix } } }] } } for es_index, after in results] } }) if DEBUG: query = { "query": { "terms": { "es_index.~s~": [es_index for es_index, after in results] } } } verify = self.es_index.search(query) while verify.hits.total: Log.note("wait for columns to be gone") verify = self.es_index.search(query) Log.note( "Deleted {{delete_result}} columns from {{table}}", table=[es_index for es_index, after in results], delete_result=delete_result.deleted) except Exception as cause: Log.warning("Problem with delete of table", cause=cause) Till(seconds=1).wait()
def test_till_in_loop(self): def loop(please_stop): counter = 0 while not please_stop: (Till(seconds=0.001) | please_stop).wait() counter += 1 Log.note("{{count}}", count=counter) please_stop=Signal("please_stop") Thread.run("loop", loop, please_stop=please_stop) Till(seconds=1).wait() with please_stop.lock: self.assertLessEqual(len(please_stop.job_queue), 1, "Expecting only one pending job on go") please_stop.go()
def _get_and_retry(self, url, branch, **kwargs): """ requests 2.5.0 HTTPS IS A LITTLE UNSTABLE """ kwargs = set_default(kwargs, {"timeout": self.timeout.seconds}) try: output = _get_url(url, branch, **kwargs) return output except Exception as e: if UNKNOWN_PUSH in e: Log.error("Tried {{url}} and failed", {"url": url}, cause=e) try: (Till(seconds=5)).wait() return _get_url(url.replace("https://", "http://"), branch, **kwargs) except Exception as f: pass path = url.split("/") if path[3] == "l10n-central": # FROM https://hg.mozilla.org/l10n-central/tr/json-pushes?full=1&changeset=a6eeb28458fd # TO https://hg.mozilla.org/mozilla-central/json-pushes?full=1&changeset=a6eeb28458fd path = path[0:3] + ["mozilla-central"] + path[5:] return self._get_and_retry("/".join(path), branch, **kwargs) elif len(path) > 5 and path[5] == "mozilla-aurora": # FROM https://hg.mozilla.org/releases/l10n/mozilla-aurora/pt-PT/json-pushes?full=1&changeset=b44a8c68fc60 # TO https://hg.mozilla.org/releases/mozilla-aurora/json-pushes?full=1&changeset=b44a8c68fc60 path = path[0:4] + ["mozilla-aurora"] + path[7:] return self._get_and_retry("/".join(path), branch, **kwargs) elif len(path) > 5 and path[5] == "mozilla-beta": # FROM https://hg.mozilla.org/releases/l10n/mozilla-beta/lt/json-pushes?full=1&changeset=03fbf7556c94 # TO https://hg.mozilla.org/releases/mozilla-beta/json-pushes?full=1&changeset=b44a8c68fc60 path = path[0:4] + ["mozilla-beta"] + path[7:] return self._get_and_retry("/".join(path), branch, **kwargs) elif len(path) > 7 and path[5] == "mozilla-release": # FROM https://hg.mozilla.org/releases/l10n/mozilla-release/en-GB/json-pushes?full=1&changeset=57f513ab03308adc7aa02cc2ea8d73fe56ae644b # TO https://hg.mozilla.org/releases/mozilla-release/json-pushes?full=1&changeset=57f513ab03308adc7aa02cc2ea8d73fe56ae644b path = path[0:4] + ["mozilla-release"] + path[7:] return self._get_and_retry("/".join(path), branch, **kwargs) elif len(path) > 5 and path[4] == "autoland": # FROM https://hg.mozilla.org/build/autoland/json-pushes?full=1&changeset=3ccccf8e5036179a3178437cabc154b5e04b333d # TO https://hg.mozilla.org/integration/autoland/json-pushes?full=1&changeset=3ccccf8e5036179a3178437cabc154b5e04b333d path = path[0:3] + ["try"] + path[5:] return self._get_and_retry("/".join(path), branch, **kwargs) Log.error("Tried {{url}} twice. Both failed.", {"url": url}, cause=[e, f])
def _update_from_es(self, please_stop): try: last_extract = Date.now() while not please_stop: now = Date.now() try: if (now - last_extract).seconds > COLUMN_EXTRACT_PERIOD: result = self.es_index.search({ "query": { "range": { "last_updated.~n~": { "gte": self.last_load } } }, "sort": ["es_index.~s~", "name.~s~", "es_column.~s~"], "from": 0, "size": 10000, }) last_extract = now with self.locker: for r in result.hits.hits._source: c = doc_to_column(r) if c: self._add(c) self.last_load = MAX( (self.last_load, c.last_updated)) while not please_stop: updates = self.for_es_update.pop_all() if not updates: break DEBUG and updates and Log.note( "{{num}} columns to push to db", num=len(updates)) self.es_index.extend([{ "value": column.__dict__() } for column in updates]) except Exception as e: Log.warning("problem updating database", cause=e) (Till(seconds=COLUMN_LOAD_PERIOD) | please_stop).wait() finally: Log.note("done")
def run_pc_daemon(self, please_stop=None): while not please_stop: try: with self.total_locker: requested = self.total_files_requested if requested != 0: mapped = self.total_tuids_mapped Log.note( "Percent complete {{mapped}}/{{requested}} = {{percent|percent(0)}}", requested=requested, mapped=mapped, percent=mapped / requested) (Till(seconds=DAEMON_WAIT_FOR_PC.seconds) | please_stop).wait() except Exception as e: Log.warning("Unexpected error in pc-daemon: {{cause}}", cause=e)
def test_or_signal_stop(self): acc = [] def worker(this, please_stop): (Till(seconds=0.3) | please_stop).wait() this.assertTrue(not not please_stop, "Expecting to have the stop signal") acc.append("worker") w = Thread.run("worker", worker, self) Till(seconds=0.1).wait() w.stop() w.join() w.stopped.wait() acc.append("done") self.assertEqual(acc, ["worker", "done"])