def test_execute() -> None: assert execute(threading.current_thread).result() != threading.current_thread() with pytest.raises(ZeroDivisionError): assert execute(lambda: 1 / 0).result() with pytest.raises(TimeoutError): assert execute(lambda: time.sleep(10), daemon=True).result(timeout=0)
def test_get_readthrough_set_wait(backend: Cache[bytes]) -> None: key = "key" def function() -> bytes: time.sleep(1) return f"{random.random()}".encode("utf-8") def worker() -> bytes: return backend.get_readthrough(key, function, noop, 10) setter = execute(worker) waiter = execute(worker) assert setter.result() == waiter.result()
def test_gauge_concurrent() -> None: backend = TestingMetricsBackend() name = "name" tags = {"tag": "value"} gauge = ThreadSafeGauge(backend, name, tags) workers = 4 barrier = Barrier(workers) def waiter() -> None: with gauge: barrier.wait() wait([execute(waiter) for i in range(workers)]) assert backend.calls == [ GaugeCall(name, 0.0, tags), GaugeCall(name, 1.0, tags), GaugeCall(name, 2.0, tags), GaugeCall(name, 3.0, tags), GaugeCall(name, 4.0, tags), GaugeCall(name, 3.0, tags), GaugeCall(name, 2.0, tags), GaugeCall(name, 1.0, tags), GaugeCall(name, 0.0, tags), ]
def __init__( self, consumer: Consumer[TPayload], commit_log_consumer: Consumer[Commit], commit_log_topic: Topic, commit_log_groups: Set[str], ) -> None: self.__consumer = consumer self.__commit_log_consumer = commit_log_consumer self.__commit_log_topic = commit_log_topic self.__commit_log_groups = commit_log_groups self.__remote_offsets: Synchronized[Mapping[str, MutableMapping[ Partition, int]]] = Synchronized({group: {} for group in commit_log_groups}) self.__commit_log_worker_stop_requested = Event() self.__commit_log_worker = execute(self.__run_commit_log_worker) # The set of partitions that have been paused by the caller/user. This # takes precedence over whether or not the partition should be paused # due to offset synchronization. self.__paused: Set[Partition] = set()
def test_gauge_concurrent() -> None: backend = TestingMetricsBackend() name = "name" tags = {"tag": "value"} gauge = Gauge(backend, name, tags) event = Event() def waiter() -> None: with gauge: event.wait() futures = [execute(waiter) for i in range(4)] event.set() wait(futures) assert backend.calls == [ GaugeCall(name, 0, tags), GaugeCall(name, 1, tags), GaugeCall(name, 2, tags), GaugeCall(name, 3, tags), GaugeCall(name, 4, tags), GaugeCall(name, 3, tags), GaugeCall(name, 2, tags), GaugeCall(name, 1, tags), GaugeCall(name, 0, tags), ]
def __init__(self, configuration: Mapping[str, Any]) -> None: self.__configuration = configuration self.__producer = ConfluentProducer(configuration) self.__shutdown_requested = Event() # The worker must execute in a separate thread to ensure that callbacks # are fired -- otherwise trying to produce "synchronously" via # ``produce(...).result()`` could result in a deadlock. self.__result = execute(self.__worker)
def test_get_readthrough_set_wait_error(backend: Cache[bytes]) -> None: key = "key" class CustomException(Exception): pass def function() -> bytes: time.sleep(1) raise CustomException("error") def worker() -> bytes: return backend.get_readthrough(key, function, noop, 10) setter = execute(worker) waiter = execute(worker) with pytest.raises(CustomException): setter.result() with pytest.raises(ExecutionError): waiter.result()
def test_get_readthrough_set_wait_timeout(backend: Cache[bytes]) -> None: key = "key" value = b"value" def function() -> bytes: time.sleep(2.5) return value def worker(timeout: int) -> bytes: return backend.get_readthrough(key, function, noop, timeout) setter = execute(partial(worker, 2)) waiter_fast = execute(partial(worker, 1)) waiter_slow = execute(partial(worker, 3)) with pytest.raises(TimeoutError): assert setter.result() with pytest.raises(TimeoutError): waiter_fast.result() with pytest.raises(ExecutionTimeoutError): waiter_slow.result()