def _make_arrangements(self, network_middleware: RestMiddleware, handpicked_ursulas: Optional[Iterable[Ursula]] = None, timeout: int = 10, ) -> Dict[Ursula, Arrangement]: """ Pick some Ursula addresses and send them arrangement proposals. Returns a dictionary of Ursulas to Arrangements if it managed to get `n` responses. """ if handpicked_ursulas is None: handpicked_ursulas = [] handpicked_addresses = [ursula.checksum_address for ursula in handpicked_ursulas] reservoir = self._make_reservoir(handpicked_addresses) value_factory = PrefetchStrategy(reservoir, self.n) def worker(address): return self._propose_arrangement(address, network_middleware) self.alice.block_until_number_of_known_nodes_is(self.n, learn_on_this_thread=True, eager=True) worker_pool = WorkerPool(worker=worker, value_factory=value_factory, target_successes=self.n, timeout=timeout, stagger_timeout=1, threadpool_size=self.n) worker_pool.start() try: successes = worker_pool.block_until_target_successes() except (WorkerPool.OutOfValues, WorkerPool.TimedOut): # It's possible to raise some other exceptions here, # but we will use the logic below. successes = worker_pool.get_successes() finally: worker_pool.cancel() worker_pool.join() accepted_arrangements = {ursula: arrangement for ursula, arrangement in successes.values()} failures = worker_pool.get_failures() accepted_addresses = ", ".join(ursula.checksum_address for ursula in accepted_arrangements) if len(accepted_arrangements) < self.n: rejected_proposals = "\n".join(f"{address}: {value}" for address, (type_, value, traceback) in failures.items()) self.log.debug( "Could not find enough Ursulas to accept proposals.\n" f"Accepted: {accepted_addresses}\n" f"Rejected:\n{rejected_proposals}") raise self._not_enough_ursulas_exception() else: self.log.debug(f"Finished proposing arrangements; accepted: {accepted_addresses}") return accepted_arrangements
def _sample( self, network_middleware: RestMiddleware, ursulas: Optional[Iterable['Ursula']] = None, timeout: int = 10, ) -> List['Ursula']: """Send concurrent requests to the /ping HTTP endpoint of nodes drawn from the reservoir.""" ursulas = ursulas or [] handpicked_addresses = [ ChecksumAddress(ursula.checksum_address) for ursula in ursulas ] self.publisher.block_until_number_of_known_nodes_is( self.shares, learn_on_this_thread=True, eager=True) reservoir = self._make_reservoir(handpicked_addresses) value_factory = PrefetchStrategy(reservoir, self.shares) def worker(address) -> 'Ursula': return self._ping_node(address, network_middleware) worker_pool = WorkerPool(worker=worker, value_factory=value_factory, target_successes=self.shares, timeout=timeout, stagger_timeout=1, threadpool_size=self.shares) worker_pool.start() try: successes = worker_pool.block_until_target_successes() except (WorkerPool.OutOfValues, WorkerPool.TimedOut): # It's possible to raise some other exceptions here but we will use the logic below. successes = worker_pool.get_successes() finally: worker_pool.cancel() worker_pool.join() failures = worker_pool.get_failures() accepted_addresses = ", ".join(ursula.checksum_address for ursula in successes.values()) if len(successes) < self.shares: rejections = "\n".join( f"{address}: {value}" for address, (type_, value, traceback) in failures.items()) message = "Failed to contact enough sampled nodes.\n"\ f"Selected:\n{accepted_addresses}\n" \ f"Unavailable:\n{rejections}" self.log.debug(message) raise self.NotEnoughUrsulas(message) self.log.debug(f"Selected nodes for policy: {accepted_addresses}") ursulas = list(successes.values()) return ursulas
class TreasureMapPublisher: log = Logger('TreasureMapPublisher') def __init__(self, worker, nodes, percent_to_complete_before_release=5, threadpool_size=120, timeout=20): self._total = len(nodes) self._block_until_this_many_are_complete = math.ceil( len(nodes) * percent_to_complete_before_release / 100) self._worker_pool = WorkerPool( worker=worker, value_factory=AllAtOnceFactory(nodes), target_successes=self._block_until_this_many_are_complete, timeout=timeout, stagger_timeout=0, threadpool_size=threadpool_size) @property def completed(self): # TODO: lock dict before copying? return self._worker_pool.get_successes() def start(self): self.log.info(f"TreasureMapPublisher starting") self._worker_pool.start() if reactor.running: reactor.callInThread(self.block_until_complete) def block_until_success_is_reasonably_likely(self): # Note: `OutOfValues`/`TimedOut` may be raised here, which means we didn't even get to # `percent_to_complete_before_release` successes. For now just letting it fire. self._worker_pool.block_until_target_successes() completed = self.completed self.log.debug( f"The minimal amount of nodes ({len(completed)}) was contacted " "while blocking for treasure map publication.") return completed def block_until_complete(self): self._worker_pool.join()
def test_batched_value_generation(join_worker_pool): """ Tests a value factory that gives out value batches in portions. """ outcomes, worker = generate_workers([ (OperatorRule(timeout_min=0.5, timeout_max=1.5), 80), (OperatorRule(fails=True, timeout_min=0.5, timeout_max=1.5), 80), ], seed=123) factory = BatchFactory(list(outcomes)) pool = WorkerPool(worker, factory, target_successes=10, timeout=10, threadpool_size=10, stagger_timeout=0.5) join_worker_pool(pool) t_start = time.monotonic() pool.start() successes = pool.block_until_target_successes() pool.cancel() pool.join() t_end = time.monotonic() assert len(successes) == 10 # Check that batch sizes in the factory were getting progressively smaller # as the number of successes grew. assert all(factory.batch_sizes[i] >= factory.batch_sizes[i + 1] for i in range(len(factory.batch_sizes) - 1)) # Since we canceled the pool, no more workers will be started and we will finish faster assert t_end - t_start < 4 successes_copy = pool.get_successes() failures_copy = pool.get_failures() assert all(value in successes_copy for value in successes)
def _enact_arrangements( self, network_middleware: RestMiddleware, arrangements: Dict[Ursula, Arrangement], publication_transaction: Optional[HexBytes] = None, publish_treasure_map: bool = True, timeout: int = 10, ): """ Attempts to distribute kfrags to Ursulas that accepted arrangements earlier. """ def worker(ursula_and_kfrag): ursula, kfrag = ursula_and_kfrag arrangement = arrangements[ursula] # TODO: seems like it would be enough to just encrypt this with Ursula's public key, # and not create a whole capsule. # Can't change for now since it's node protocol. payload = self._make_enactment_payload(publication_transaction, kfrag) message_kit, _signature = self.alice.encrypt_for(ursula, payload) try: # TODO: Concurrency response = network_middleware.enact_policy( ursula, arrangement.id, message_kit.to_bytes()) except network_middleware.UnexpectedResponse as e: status = e.status else: status = response.status_code return status value_factory = AllAtOnceFactory(list(zip(arrangements, self.kfrags))) worker_pool = WorkerPool(worker=worker, value_factory=value_factory, target_successes=self.n, timeout=timeout, threadpool_size=self.n) worker_pool.start() # Block until everything is complete. We need all the workers to finish. worker_pool.join() successes = worker_pool.get_successes() if len(successes) != self.n: raise Policy.EnactmentError() # TODO: Enable re-tries? statuses = { ursula_and_kfrag[0].checksum_address: status for ursula_and_kfrag, status in successes.items() } if not all(status == 200 for status in statuses.values()): report = "\n".join(f"{address}: {status}" for address, status in statuses.items()) self.log.debug( f"Policy enactment failed. Request statuses:\n{report}") # OK, let's check: if two or more Ursulas claimed we didn't pay, # we need to re-evaulate our situation here. number_of_claims_of_freeloading = sum( status == 402 for status in statuses.values()) # TODO: a better exception here? if number_of_claims_of_freeloading > 2: raise self.alice.NotEnoughNodes # otherwise just raise a more generic error raise Policy.EnactmentError()