def wrapper(): if (wrapper.alazy_constant_refresh_time == 0) or ( (ttl != 0) and (wrapper.alazy_constant_refresh_time < utime() - ttl)): wrapper.alazy_constant_cached_value = yield fn.asynq() wrapper.alazy_constant_refresh_time = utime() result(wrapper.alazy_constant_cached_value) return
def test_time_offset(): time_before_offset = qcore.utime() with TimeOffset(qcore.HOUR): time_during_offset = qcore.utime() time_after_offset = qcore.utime() assert_eq(time_before_offset, time_after_offset, tolerance=qcore.MINUTE) assert_ne(time_before_offset, time_during_offset, tolerance=qcore.MINUTE) assert_le(time_after_offset, time_during_offset)
def _flush_batch(self, batch): self.on_before_batch_flush(batch) try: if _debug_options.COLLECT_PERF_STATS: start = utime() batch.flush() batch.dump_perf_stats(utime() - start) else: batch.flush() finally: self.on_after_batch_flush(batch) return 0
def _continue_with_task(self, task): task._resume_contexts() self.active_task = task if _debug_options.DUMP_CONTINUE_TASK: debug.write('@async: -> continuing %s' % debug.str(task)) if _debug_options.COLLECT_PERF_STATS: start = utime() task._continue() task._total_time += utime() - start if task.is_computed() and isinstance(task, AsyncTask): task.dump_perf_stats() else: task._continue() if _debug_options.DUMP_CONTINUE_TASK: debug.write('@async: <- continued %s' % debug.str(task)) self.active_task = None # We get a new set of dependencies when we run _continue, so these haven't # been scheduled. task._dependencies_scheduled = False
def ttuple(symbols: SymbolSequence, threshold=35) -> TestResult: L = len(symbols) # Step 1: Find the largest t such that the number of t-tuples of symbols # is >= `threshold` t = None p_max = -math.inf import qcore start = qcore.utime() for i in range(1, L): # Step 2: Let Q[i] := occurrences of most common i-tuple for i in [1, t] # print(f"i={i} ==================") # Q_i = _most_common_ituple(symbols, i, L) # print(f"my Qi {Q_i}") most_frequent_i_tupl, Q_i = (Counter([ tuple(symbols[ndx:ndx + i]) for ndx in range(L - i + 1) ]).most_common(1).pop()) # print(f"their Qi {most_frequent_i_tupl} {Q_i}") # logging.debug(f"Most common {i}-tuple ({Q_i} occurrences: --") if Q_i >= threshold: t = i # Step 3: Estimate maximum individual t-tuple probability # This is a "sample" probability (because we never get the full source # of any entropy data). P_i = (Q_i / (L - i + 1))**(1 / i) p_max = max(p_max, P_i) # print(f"Took {(qcore.utime() - start) / qcore.MILLISECOND}ms to find t-tuple") if t is None: raise CannotCompute(f"Couldn't find t-tuple for threshold={threshold}") # Step 4: pu := upper bound on most common t-tuple probability pu = min(1.0, p_max + (2.576 * math.sqrt( (p_max * (1.0 - p_max) / (L - 1.0))))) min_entropy_per_symbol = -math.log(pu, bitwidth_of_symbols(symbols)) min_entropy = min_entropy_per_symbol return TestResult(False, None, min_entropy)
def pause(self): self.total_time += utime() - self._last_start_time
def resume(self): self._last_start_time = utime()