Beispiel #1
0
def test_cancel_waiting_workers(join_worker_pool):
    """
    If we have a small pool and many workers, it is possible for workers to be enqueued
    one after another in one thread.
    We test that if we call `cancel()`, these enqueued workers are cancelled too.
    """

    outcomes, worker = generate_workers([
        (WorkerRule(timeout_min=1, timeout_max=1), 100),
    ],
                                        seed=123)

    factory = AllAtOnceFactory(list(outcomes))
    pool = WorkerPool(worker,
                      factory,
                      target_successes=10,
                      timeout=10,
                      threadpool_size=10)
    join_worker_pool(pool)

    t_start = time.monotonic()
    pool.start()
    pool.block_until_target_successes()
    pool.cancel()
    pool.join()
    t_end = time.monotonic()

    # We have 10 threads in the pool and 100 workers that are all enqueued at once at the start.
    # If we didn't check for the cancel condition, we would have to wait for 10 seconds.
    # We get 10 successes after 1s and cancel the workers,
    # but the next workers in each thread have already started, so we have to wait for another 1s.
    assert t_end - t_start < 2.5
Beispiel #2
0
def test_join(join_worker_pool):
    """
    Test joining the pool.
    """

    outcomes, worker = generate_workers([
        (WorkerRule(timeout_min=0.5, timeout_max=1.5), 9),
        (WorkerRule(fails=True, timeout_min=0.5, timeout_max=1.5), 20),
    ],
                                        seed=123)

    factory = AllAtOnceFactory(list(outcomes))
    pool = WorkerPool(worker,
                      factory,
                      target_successes=10,
                      timeout=1,
                      threadpool_size=30)
    join_worker_pool(pool)

    t_start = time.monotonic()
    pool.start()
    pool.join()
    t_end = time.monotonic()

    pool.join()  # should work the second time too

    # Even though timeout is 1, there are long-running workers which we can't interupt.
    assert t_end - t_start < 3
Beispiel #3
0
def test_wait_for_successes_timed_out(join_worker_pool):
    """
    Checks that if enough successful workers can't finish before the timeout, we get an exception.
    """

    outcomes, worker = generate_workers([
        (WorkerRule(timeout_min=0, timeout_max=0.5), 9),
        (WorkerRule(timeout_min=1.5, timeout_max=2.5), 1),
        (WorkerRule(fails=True, timeout_min=1.5, timeout_max=2.5), 20),
    ],
                                        seed=123)

    factory = AllAtOnceFactory(list(outcomes))
    pool = WorkerPool(worker,
                      factory,
                      target_successes=10,
                      timeout=1,
                      threadpool_size=30)
    join_worker_pool(pool)

    t_start = time.monotonic()
    pool.start()
    with pytest.raises(WorkerPool.TimedOut):
        successes = pool.block_until_target_successes()
    t_end = time.monotonic()

    # Even though timeout is 1, there are long-running workers which we can't interupt.
    assert t_end - t_start < 3
Beispiel #4
0
def test_wait_for_successes_out_of_values(join_worker_pool):
    """
    Checks that if there weren't enough successful workers, `block_until_target_successes()`
    raises an exception when the value factory is exhausted.
    """

    outcomes, worker = generate_workers([
        (WorkerRule(timeout_min=0.5, timeout_max=1.5), 9),
        (WorkerRule(fails=True, timeout_min=0.5, timeout_max=1.5), 20),
    ],
                                        seed=123)

    factory = AllAtOnceFactory(list(outcomes))
    pool = WorkerPool(worker,
                      factory,
                      target_successes=10,
                      timeout=10,
                      threadpool_size=15)
    join_worker_pool(pool)

    t_start = time.monotonic()
    pool.start()
    with pytest.raises(WorkerPool.OutOfValues):
        successes = pool.block_until_target_successes()
    t_end = time.monotonic()

    # We have roughly 2 workers per thread, so it shouldn't take longer than 1.5s (max timeout) * 2
    assert t_end - t_start < 4
Beispiel #5
0
def test_wait_for_successes(join_worker_pool):
    """
    Checks that `block_until_target_successes()` returns in time and gives all the successes,
    if there were enough of them.
    """

    outcomes, worker = generate_workers(
        [
            (WorkerRule(timeout_min=0.5, timeout_max=1.5), 10),
            (WorkerRule(fails=True, timeout_min=1, timeout_max=3), 20),
        ],
        seed=123)

    factory = AllAtOnceFactory(list(outcomes))
    pool = WorkerPool(worker, factory, target_successes=10, timeout=10, threadpool_size=30)
    join_worker_pool(pool)

    t_start = time.monotonic()
    pool.start()
    successes = pool.block_until_target_successes()
    t_end = time.monotonic()

    failures = pool.get_failures()
    assert all(outcomes[value].fails for value in failures)

    assert len(successes) == 10

    # We have more threads in the pool than the workers,
    # so all the successful ones should be able to finish right away.
    assert t_end - t_start < 2

    # Should be able to do it several times
    successes = pool.block_until_target_successes()
    assert len(successes) == 10
Beispiel #6
0
    def __init__(self,
                 worker,
                 nodes,
                 percent_to_complete_before_release=5,
                 threadpool_size=120,
                 timeout=20):

        self._total = len(nodes)
        self._block_until_this_many_are_complete = math.ceil(len(nodes) * percent_to_complete_before_release / 100)
        self._worker_pool = WorkerPool(worker=worker,
                                       value_factory=AllAtOnceFactory(nodes),
                                       target_successes=self._block_until_this_many_are_complete,
                                       timeout=timeout,
                                       stagger_timeout=0,
                                       threadpool_size=threadpool_size)
Beispiel #7
0
    def _enact_arrangements(
        self,
        network_middleware: RestMiddleware,
        arrangements: Dict[Ursula, Arrangement],
        publication_transaction: Optional[HexBytes] = None,
        publish_treasure_map: bool = True,
        timeout: int = 10,
    ):
        """
        Attempts to distribute kfrags to Ursulas that accepted arrangements earlier.
        """
        def worker(ursula_and_kfrag):
            ursula, kfrag = ursula_and_kfrag
            arrangement = arrangements[ursula]

            # TODO: seems like it would be enough to just encrypt this with Ursula's public key,
            # and not create a whole capsule.
            # Can't change for now since it's node protocol.
            payload = self._make_enactment_payload(publication_transaction,
                                                   kfrag)
            message_kit, _signature = self.alice.encrypt_for(ursula, payload)

            try:
                # TODO: Concurrency
                response = network_middleware.enact_policy(
                    ursula, arrangement.id, message_kit.to_bytes())
            except network_middleware.UnexpectedResponse as e:
                status = e.status
            else:
                status = response.status_code

            return status

        value_factory = AllAtOnceFactory(list(zip(arrangements, self.kfrags)))
        worker_pool = WorkerPool(worker=worker,
                                 value_factory=value_factory,
                                 target_successes=self.n,
                                 timeout=timeout,
                                 threadpool_size=self.n)

        worker_pool.start()

        # Block until everything is complete. We need all the workers to finish.
        worker_pool.join()

        successes = worker_pool.get_successes()

        if len(successes) != self.n:
            raise Policy.EnactmentError()

        # TODO: Enable re-tries?
        statuses = {
            ursula_and_kfrag[0].checksum_address: status
            for ursula_and_kfrag, status in successes.items()
        }
        if not all(status == 200 for status in statuses.values()):
            report = "\n".join(f"{address}: {status}"
                               for address, status in statuses.items())
            self.log.debug(
                f"Policy enactment failed. Request statuses:\n{report}")

            # OK, let's check: if two or more Ursulas claimed we didn't pay,
            # we need to re-evaulate our situation here.
            number_of_claims_of_freeloading = sum(
                status == 402 for status in statuses.values())

            # TODO: a better exception here?
            if number_of_claims_of_freeloading > 2:
                raise self.alice.NotEnoughNodes

            # otherwise just raise a more generic error
            raise Policy.EnactmentError()