def test_wait_for_successes(join_worker_pool): """ Checks that `block_until_target_successes()` returns in time and gives all the successes, if there were enough of them. """ outcomes, worker = generate_workers( [ (OperatorRule(timeout_min=0.5, timeout_max=1.5), 10), (OperatorRule(fails=True, timeout_min=1, timeout_max=3), 20), ], seed=123) factory = AllAtOnceFactory(list(outcomes)) pool = WorkerPool(worker, factory, target_successes=10, timeout=10, threadpool_size=30) join_worker_pool(pool) t_start = time.monotonic() pool.start() successes = pool.block_until_target_successes() t_end = time.monotonic() failures = pool.get_failures() assert all(outcomes[value].fails for value in failures) assert len(successes) == 10 # We have more threads in the pool than the workers, # so all the successful ones should be able to finish right away. assert t_end - t_start < 2 # Should be able to do it several times successes = pool.block_until_target_successes() assert len(successes) == 10
def _make_arrangements(self, network_middleware: RestMiddleware, handpicked_ursulas: Optional[Iterable[Ursula]] = None, timeout: int = 10, ) -> Dict[Ursula, Arrangement]: """ Pick some Ursula addresses and send them arrangement proposals. Returns a dictionary of Ursulas to Arrangements if it managed to get `n` responses. """ if handpicked_ursulas is None: handpicked_ursulas = [] handpicked_addresses = [ursula.checksum_address for ursula in handpicked_ursulas] reservoir = self._make_reservoir(handpicked_addresses) value_factory = PrefetchStrategy(reservoir, self.n) def worker(address): return self._propose_arrangement(address, network_middleware) self.alice.block_until_number_of_known_nodes_is(self.n, learn_on_this_thread=True, eager=True) worker_pool = WorkerPool(worker=worker, value_factory=value_factory, target_successes=self.n, timeout=timeout, stagger_timeout=1, threadpool_size=self.n) worker_pool.start() try: successes = worker_pool.block_until_target_successes() except (WorkerPool.OutOfValues, WorkerPool.TimedOut): # It's possible to raise some other exceptions here, # but we will use the logic below. successes = worker_pool.get_successes() finally: worker_pool.cancel() worker_pool.join() accepted_arrangements = {ursula: arrangement for ursula, arrangement in successes.values()} failures = worker_pool.get_failures() accepted_addresses = ", ".join(ursula.checksum_address for ursula in accepted_arrangements) if len(accepted_arrangements) < self.n: rejected_proposals = "\n".join(f"{address}: {value}" for address, (type_, value, traceback) in failures.items()) self.log.debug( "Could not find enough Ursulas to accept proposals.\n" f"Accepted: {accepted_addresses}\n" f"Rejected:\n{rejected_proposals}") raise self._not_enough_ursulas_exception() else: self.log.debug(f"Finished proposing arrangements; accepted: {accepted_addresses}") return accepted_arrangements
def _sample( self, network_middleware: RestMiddleware, ursulas: Optional[Iterable['Ursula']] = None, timeout: int = 10, ) -> List['Ursula']: """Send concurrent requests to the /ping HTTP endpoint of nodes drawn from the reservoir.""" ursulas = ursulas or [] handpicked_addresses = [ ChecksumAddress(ursula.checksum_address) for ursula in ursulas ] self.publisher.block_until_number_of_known_nodes_is( self.shares, learn_on_this_thread=True, eager=True) reservoir = self._make_reservoir(handpicked_addresses) value_factory = PrefetchStrategy(reservoir, self.shares) def worker(address) -> 'Ursula': return self._ping_node(address, network_middleware) worker_pool = WorkerPool(worker=worker, value_factory=value_factory, target_successes=self.shares, timeout=timeout, stagger_timeout=1, threadpool_size=self.shares) worker_pool.start() try: successes = worker_pool.block_until_target_successes() except (WorkerPool.OutOfValues, WorkerPool.TimedOut): # It's possible to raise some other exceptions here but we will use the logic below. successes = worker_pool.get_successes() finally: worker_pool.cancel() worker_pool.join() failures = worker_pool.get_failures() accepted_addresses = ", ".join(ursula.checksum_address for ursula in successes.values()) if len(successes) < self.shares: rejections = "\n".join( f"{address}: {value}" for address, (type_, value, traceback) in failures.items()) message = "Failed to contact enough sampled nodes.\n"\ f"Selected:\n{accepted_addresses}\n" \ f"Unavailable:\n{rejections}" self.log.debug(message) raise self.NotEnoughUrsulas(message) self.log.debug(f"Selected nodes for policy: {accepted_addresses}") ursulas = list(successes.values()) return ursulas
def test_batched_value_generation(join_worker_pool): """ Tests a value factory that gives out value batches in portions. """ outcomes, worker = generate_workers([ (OperatorRule(timeout_min=0.5, timeout_max=1.5), 80), (OperatorRule(fails=True, timeout_min=0.5, timeout_max=1.5), 80), ], seed=123) factory = BatchFactory(list(outcomes)) pool = WorkerPool(worker, factory, target_successes=10, timeout=10, threadpool_size=10, stagger_timeout=0.5) join_worker_pool(pool) t_start = time.monotonic() pool.start() successes = pool.block_until_target_successes() pool.cancel() pool.join() t_end = time.monotonic() assert len(successes) == 10 # Check that batch sizes in the factory were getting progressively smaller # as the number of successes grew. assert all(factory.batch_sizes[i] >= factory.batch_sizes[i + 1] for i in range(len(factory.batch_sizes) - 1)) # Since we canceled the pool, no more workers will be started and we will finish faster assert t_end - t_start < 4 successes_copy = pool.get_successes() failures_copy = pool.get_failures() assert all(value in successes_copy for value in successes)