Пример #1
0
async def http_serve(stream):
    wrapper = TrioHTTPWrapper(stream)
    wrapper.info("Got new connection")
    while True:
        assert wrapper.conn.states == {
            h11.CLIENT: h11.IDLE, h11.SERVER: h11.IDLE}

        try:
            with trio.fail_after(TIMEOUT):
                wrapper.info("Server main loop waiting for request")
                event = await wrapper.next_event()
                wrapper.info("Server main loop got event:", event)
                if type(event) is h11.Request:
                    await send_echo_response(wrapper, event)
        except Exception as exc:
            wrapper.info("Error during response handler:", exc)
            await maybe_send_error_response(wrapper, exc)

        if wrapper.conn.our_state is h11.MUST_CLOSE:
            wrapper.info("connection is not reusable, so shutting down")
            await wrapper.shutdown_and_clean_up()
            return
        else:
            try:
                wrapper.info("trying to re-use connection")
                wrapper.conn.start_next_cycle()
            except h11.ProtocolError:
                states = wrapper.conn.states
                wrapper.info("unexpected state", states, "-- bailing out")
                await maybe_send_error_response(
                    wrapper,
                    RuntimeError("unexpected state {}".format(states)))
                await wrapper.shutdown_and_clean_up()
                return
Пример #2
0
 def assertTimeoutTrio(timeout: float) -> Generator[None, None, None]:
     """
     A context manager that checks the code executed in its context was not done after the given amount of time
     on the event loop.
     """
     with pytest.raises(trio.TooSlowError), trio.fail_after(timeout):
         yield
Пример #3
0
    async def start(self):
        self._tmp = Path(tempfile.mkdtemp())

        config = self._create_config(self._REDIS_PORT, self._MASTER_CONFIG, {
            'PORT': self._REDIS_PORT,
        })
        self._nodes.append(RedisNodeProcess(config.parent, self._REDIS_PORT))

        for n in range(1, self.replica_count + 1):
            port = self._REDIS_PORT + n
            config = self._create_config(port, self._REPLICA_CONFIG, {
                'PORT': port,
                'MASTER_PORT': self._REDIS_PORT,
            })
            self._nodes.append(RedisNodeProcess(config.parent, port))

        for n in range(self.sentinel_count):
            port = self._SENTINEL_PORT + n
            config = self._create_config(
                port, self._SENTINEL_CONFIG, {
                    'PORT': port,
                    'MASTER_PORT': self._REDIS_PORT,
                    'QUORUM': self.sentinel_count - 1,
                })
            self._sentinels.append(RedisSentinelProcess(config.parent, port))

        for node in self._nodes:
            await node.open()
        for sentinel in self._sentinels:
            await sentinel.open()

        # Wait until sentinel has discovered the replicas.
        # Otherwise automatic failover is not possible.
        sc = await self._sentinel_client()
        with trio.fail_after(self.MAX_SENTINEL_DISCOVERY_WAIT + 300.0):
            while True:
                actual_replicas = len(await sc.replicas('test_cluster'))
                actual_sentinels = len(await sc.sentinels('test_cluster'))
                expected_replicas = len(self._nodes) - 1
                expected_sentinels = len(self._sentinels) - 1
                if actual_replicas == expected_replicas \
                        and actual_sentinels == expected_sentinels:
                    # Give the sentinels some time to process stuff.
                    await trio.sleep(1.0)
                    break
                # Don't spam sentinel.
                await trio.sleep(0.1)
Пример #4
0
async def test_alexandria_network_get_content_proof_api(
    alice,
    bob,
    alice_alexandria_network,
    bob_alexandria_client,
    content_size,
):
    content = ContentFactory(length=content_size)
    proof = compute_proof(content, sedes=content_sedes)

    async with bob_alexandria_client.subscribe(
            GetContentMessage) as subscription:
        async with trio.open_nursery() as nursery:

            async def _serve():
                request = await subscription.receive()
                if content_size > 1024:
                    partial = proof.to_partial(
                        request.message.payload.start_chunk_index * 32,
                        request.message.payload.max_chunks * 32,
                    )
                    payload = partial.serialize()
                    is_proof = True
                else:
                    payload = content
                    is_proof = False
                await bob_alexandria_client.send_content(
                    request.sender_node_id,
                    request.sender_endpoint,
                    is_proof=is_proof,
                    payload=payload,
                    request_id=request.request_id,
                )

            nursery.start_soon(_serve)

            with trio.fail_after(2):
                partial = await alice_alexandria_network.get_content_proof(
                    bob.node_id,
                    hash_tree_root=proof.get_hash_tree_root(),
                    content_key=b"test-content-key",
                    start_chunk_index=0,
                    max_chunks=16,
                )
                validate_proof(partial)
                partial_data = partial.get_proven_data()
                assert partial_data[0:16 * 32] == content[0:16 * 32]
Пример #5
0
async def test_sync_with_concurrent_reencryption(running_backend, alice_core,
                                                 bob_user_fs, autojump_clock,
                                                 monkeypatch):
    # Create a shared workspace
    wid = await create_shared_workspace("w", bob_user_fs, alice_core)
    alice_workspace = alice_core.user_fs.get_workspace(wid)
    bob_workspace = bob_user_fs.get_workspace(wid)

    # Alice creates a files, let it sync
    await alice_workspace.write_bytes("/test.txt", b"v1")
    await alice_core.wait_idle_monitors()
    await bob_user_fs.sync()

    # Freeze Alice message processing so she won't process `sharing.reencrypted` messages
    allow_message_processing = trio.Event()

    async def _mockpoint_sleep():
        await allow_message_processing.wait()

    monkeypatch.setattr(
        "parsec.core.messages_monitor.freeze_messages_monitor_mockpoint",
        _mockpoint_sleep)

    # Now Bob reencrypt the workspace
    reencryption_job = await bob_user_fs.workspace_start_reencryption(wid)
    await bob_user_fs.process_last_messages()
    total, done = await reencryption_job.do_one_batch()
    assert total == done  # Sanity check to make sure the encryption is finished

    # Alice modify the workspace and try to do the sync...
    await alice_workspace.write_bytes("/test.txt", b"v2")
    # Sync monitor will try and fail to do the sync of the workspace
    await trio.sleep(300)  # autojump, so not *really* 300s
    assert not alice_core.are_monitors_idle()

    # Now let Alice process the `sharing.reencrypted` messages, this should
    # allow to do the sync
    allow_message_processing.set()
    with trio.fail_after(60):  # autojump, so not *really* 60s
        await alice_core.wait_idle_monitors()

    # Just make sure the sync is done
    await bob_workspace.sync()
    for workspace in (bob_workspace, alice_workspace):
        info = await workspace.path_info("/test.txt")
        assert not info["need_sync"]
        assert info["base_version"] == 3
Пример #6
0
 async def _test_view_change_transitions_safely_without_quorum(self):
     for bft_config in bft.interesting_configs(lambda n, f, c: f >= 2):
         config = bft.TestConfig(n=bft_config['n'],
                                 f=bft_config['f'],
                                 c=bft_config['c'],
                                 num_clients=bft_config['num_clients'],
                                 key_file_prefix=KEY_FILE_PREFIX,
                                 start_replica_cmd=start_replica_cmd)
         with bft.BftTestNetwork(config) as bft_network:
             await bft_network.init()
             skvbc = kvbc.SimpleKVBCProtocol(bft_network)
             [bft_network.start_replica(i) for i in range(1, config.n - 1)]
             with trio.fail_after(60):  # seconds
                 async with trio.open_nursery() as nursery:
                     nursery.start_soon(
                         skvbc.send_indefinite_write_requests)
                     # See if replica 1 has become the new primary
                     await bft_network.wait_for_view_change(
                         replica_id=1, expected=lambda v: v == 1)
                     # At this point a view change has successfully completed
                     # with node 1 as the primary. The faulty assertion should
                     # have crashed old nodes.
                     #
                     # In case the nodes didn't crash, stop node 1 to trigger
                     # another view change. Starting node 0 should allow the view
                     # change to succeed. If there is a timeout then the other
                     # nodes have likely crashed due to the faulty assertion. The
                     # crash will show in the logs when running the test
                     # verbosely:
                     #
                     # 21: INFO 2019-08-30skvbc_replica:
                     # /home/andrewstone/concord-bft.py/bftengine/src/bftengine/PersistentStorageImp.cpp:881:
                     # void
                     # bftEngine::impl::PersistentStorageImp::verifySetDescriptorOfLastExitFromView(const
                     # bftEngine::impl::DescriptorOfLastExitFromView &):
                     # Assertion `false' failed.
                     bft_network.stop_replica(1)
                     bft_network.start_replica(0)
                     while True:
                         with trio.move_on_after(.5):  # seconds
                             key = ['replica', 'Gauges', 'lastAgreedView']
                             replica_id = 2
                             view = await bft_network.metrics.get(
                                 replica_id, *key)
                             if view == 2:
                                 # success!
                                 nursery.cancel_scope.cancel()
Пример #7
0
    async def test_view_change_transitions_safely_without_quorum(
            self, bft_network, tracker):
        """
        Start up only N-2 out of N replicas and send client commands. This should
        trigger a succesful view change attempt and trigger the assert in issue
        #194.

        This is a regression test for
        https://github.com/vmware/concord-bft/issues/194.
        """
        [
            bft_network.start_replica(i)
            for i in range(1, bft_network.config.n - 1)
        ]
        with trio.fail_after(60):  # seconds
            async with trio.open_nursery() as nursery:
                nursery.start_soon(tracker.send_indefinite_tracked_ops)
                # See if replica 1 has become the new primary
                await bft_network.wait_for_view(replica_id=1,
                                                expected=lambda v: v == 1)
                # At this point a view change has successfully completed
                # with node 1 as the primary. The faulty assertion should
                # have crashed old nodes.
                #
                # In case the nodes didn't crash, stop node 1 to trigger
                # another view change. Starting node 0 should allow the view
                # change to succeed. If there is a timeout then the other
                # nodes have likely crashed due to the faulty assertion. The
                # crash will show in the logs when running the test
                # verbosely:
                #
                # 21: INFO 2019-08-30skvbc_replica:
                # /home/andrewstone/concord-bft.py/bftengine/src/bftengine/PersistentStorageImp.cpp:881:
                # void
                # bftEngine::impl::PersistentStorageImp::verifySetDescriptorOfLastExitFromView(const
                # bftEngine::impl::DescriptorOfLastExitFromView &):
                # Assertion `false' failed.
                bft_network.stop_replica(1)
                bft_network.start_replica(0)
                while True:
                    with trio.move_on_after(.5):  # seconds
                        key = ['replica', 'Gauges', 'lastAgreedView']
                        replica_id = 2
                        view = await bft_network.metrics.get(replica_id, *key)
                        if view == 2:
                            # success!
                            nursery.cancel_scope.cancel()
Пример #8
0
    async def sendSync(self, msg, read_only, seq_num=None, cid=None, pre_process=False, m_of_n_quorum=None):
        """
        Send a client request and wait for a m_of_n_quorum (if None, it will set to 2F+C+1 quorum) of replies.

        Return a single reply message if a quorum of replies matches.
        Otherwise, raise a trio.TooSlowError indicating the request timed out.

        Retry Strategy:
            If the request is a write and the primary is known then send only to
            the primary on the first attempt. Otherwise, if the request is read
            only or the primary is unknown, then send to all replicas on the
            first attempt.

            After `config.retry_timeout_milli` without receiving a quorum of
            identical replies, then clear the replies and send to all replicas.
            Continue this strategy every `retry_timeout_milli` until
            `config.req_timeout_milli` elapses. If `config.req_timeout_milli`
            elapses then a trio.TooSlowError is raised.

         Note that this method also binds the socket to an appropriate port if
         not already bound.
        """
        if not self.sock_bound:
            await self.bind()

        if seq_num is None:
            seq_num = self.req_seq_num.next()

        if cid is None:
            cid = str(seq_num)
        data = bft_msgs.pack_request(
            self.client_id, seq_num, read_only, self.config.req_timeout_milli, cid, msg, pre_process)

        if m_of_n_quorum is None:
            m_of_n_quorum = MofNQuorum.LinearizableQuorum(self.config, [r.id for r in self.replicas])

        # Raise a trio.TooSlowError exception if a quorum of replies
        try:
            with trio.fail_after(self.config.req_timeout_milli / 1000):
                self.reset_on_new_request()
                self.retries = 0
                return await self.send_loop(data, read_only, m_of_n_quorum)
        except trio.TooSlowError:
            print("TooSlowError thrown from client_id", self.client_id, "for seq_num", seq_num)
            raise trio.TooSlowError
        finally:
            pass
Пример #9
0
async def test_pool_get_session_by_endpoint(tester, initiator, pool, events):
    endpoint = EndpointFactory()

    # A: initiated locally, handshake incomplete
    remote_a = tester.node(endpoint=endpoint)
    session_a = pool.initiate_session(endpoint, remote_a.node_id)

    # B: initiated locally, handshake complete
    remote_b = tester.node(endpoint=endpoint)
    driver_b = tester.session_pair(initiator, remote_b,)
    with trio.fail_after(1):
        await driver_b.handshake()
    session_b = driver_b.initiator.session

    # C: initiated remotely, handshake incomplete
    session_c = pool.receive_session(endpoint)

    # D: initiated remotely, handshake complete
    remote_d = tester.node(endpoint=endpoint)
    driver_d = tester.session_pair(remote_d, initiator,)
    await driver_d.handshake()
    session_d = driver_d.recipient.session

    # Some other sessions with non-matching endpoints before handshake
    session_e = pool.receive_session(EndpointFactory())
    session_f = pool.initiate_session(EndpointFactory(), NodeIDFactory())

    # Some other sessions with non-matching endpoints after handshake
    driver_g = tester.session_pair(initiator,)
    await driver_g.handshake()
    session_g = driver_g.initiator.session

    driver_h = tester.session_pair(recipient=initiator,)
    await driver_h.handshake()
    session_h = driver_h.recipient.session

    endpoint_matches = pool.get_sessions_for_endpoint(endpoint)
    assert len(endpoint_matches) == 4
    assert session_a in endpoint_matches
    assert session_b in endpoint_matches
    assert session_c in endpoint_matches
    assert session_d in endpoint_matches

    assert session_e not in endpoint_matches
    assert session_f not in endpoint_matches
    assert session_g not in endpoint_matches
    assert session_h not in endpoint_matches
    async def test_semi_manual_upgrade(self, bft_network):
        """
             Sends a wedge command and check that the system stops from processing new requests.
             Note that in this test we assume no failures and synchronized network.
             The test does the following:
             1. A client sends a wedge command
             2. The client then sends a "Have you stopped" read only command such that each replica answers "I have stopped"
             3. Apollo stops all replicas
             4. Apollo starts all replicas
             5. A client verifies that new write requests are being processed
         """
        bft_network.start_all_replicas()
        skvbc = kvbc.SimpleKVBCProtocol(bft_network)
        client = bft_network.random_client()

        key, val = await skvbc.write_known_kv()
        await client.write(
            skvbc.write_req([], [], block_id=0, wedge_command=True))

        with trio.fail_after(seconds=60):
            done = False
            while done is False:
                await trio.sleep(seconds=1)
                msg = skvbc.get_have_you_stopped_req()
                rep = await client.read(
                    msg,
                    m_of_n_quorum=bft_client.MofNQuorum.All(
                        client.config,
                        [r for r in range(bft_network.config.n)]))
                rsi_rep = client.get_rsi_replies()
                done = True
                for r in rsi_rep.values():
                    if skvbc.parse_rsi_reply(rep, r) == 0:
                        done = False
                        break

        await self.validate_stop_on_super_stable_checkpoint(bft_network, skvbc)

        bft_network.stop_all_replicas()

        # Here the system operator runs a manual upgrade
        # input("update the software and press any kay to continue")

        bft_network.start_all_replicas()

        await skvbc.assert_kv_write_executed(key, val)
        await skvbc.write_known_kv()
Пример #11
0
async def test_peer_packer_sends_who_are_you(peer_packer,
                                             inbound_packet_channels,
                                             outbound_packet_channels,
                                             nursery):
    inbound_packet = InboundPacket(
        AuthTagPacketFactory(),
        EndpointFactory(),
    )

    inbound_packet_channels[0].send_nowait(inbound_packet)
    with trio.fail_after(0.5):
        outbound_packet = await outbound_packet_channels[1].receive()

    assert peer_packer.is_during_handshake
    assert outbound_packet.receiver_endpoint == inbound_packet.sender_endpoint
    assert isinstance(outbound_packet.packet, WhoAreYouPacket)
    assert outbound_packet.packet.token == inbound_packet.packet.auth_tag
Пример #12
0
async def test_peer_packer_initiates_handshake(peer_packer,
                                               outbound_message_channels,
                                               outbound_packet_channels,
                                               nursery):
    outbound_message = OutboundMessage(
        PingMessageFactory(),
        EndpointFactory(),
        peer_packer.remote_node_id,
    )

    outbound_message_channels[0].send_nowait(outbound_message)
    with trio.fail_after(0.5):
        outbound_packet = await outbound_packet_channels[1].receive()

    assert peer_packer.is_during_handshake
    assert outbound_packet.receiver_endpoint == outbound_message.receiver_endpoint
    assert isinstance(outbound_packet.packet, AuthTagPacket)
Пример #13
0
async def test_datagram_receiver(socket_pair):
    sending_socket, receiving_socket = socket_pair
    receiver_address = receiving_socket.getsockname()
    sender_address = sending_socket.getsockname()

    send_channel, receive_channel = trio.open_memory_channel(1)
    async with background_service(
            DatagramReceiver(receiving_socket, send_channel)):
        data = b"some packet"

        await sending_socket.sendto(data, receiver_address)
        with trio.fail_after(0.5):
            received_datagram = await receive_channel.receive()

        assert received_datagram.datagram == data
        assert received_datagram.sender.ip_address == sender_address[0]
        assert received_datagram.sender.port == sender_address[1]
Пример #14
0
async def test_prune_cache(cache_and_workertype):
    # setup phase
    cache, worker_type = cache_and_workertype
    dead_worker = worker_type(0.3, bool, bool)
    await dead_worker.start()
    assert (await
            dead_worker.run_sync(_monkeypatch_max_timeout)).unwrap() is True
    with trio.fail_after(2):
        assert await dead_worker.wait() is not None
    live_worker = worker_type(math.inf, bool, bool)
    await live_worker.start()
    assert (await live_worker.run_sync(bool)).unwrap() is False
    # put dead worker into the cache on the left
    cache.extend(iter([dead_worker, live_worker]))
    cache.prune()
    assert live_worker in cache
    assert dead_worker not in cache
Пример #15
0
    async def main():

        counts = Counter()

        async with tractor.open_nursery() as tn:
            p = await tn.start_actor(
                'inf_streamer',
                enable_modules=[__name__],
            )
            async with (
                p.open_context(inf_streamer) as (ctx, _),
                ctx.open_stream() as stream,
            ):

                async def pull_and_count(name: str):
                    # name = trio.lowlevel.current_task().name
                    async with stream.subscribe() as recver:
                        assert isinstance(
                            recver,
                            tractor.trionics.BroadcastReceiver
                        )
                        async for val in recver:
                            # print(f'{name}: {val}')
                            counts[name] += 1

                        print(f'{name} bcaster ended')

                    print(f'{name} completed')

                with trio.fail_after(3):
                    async with trio.open_nursery() as nurse:
                        for i in range(consumers):
                            nurse.start_soon(pull_and_count, i)

                        await trio.sleep(0.5)
                        print('\nterminating')
                        await stream.send('done')

            print('closed stream connection')

            assert len(counts) == consumers
            mx = max(counts.values())
            # make sure each task received all stream values
            assert all(val == mx for val in counts.values())

            await p.cancel_actor()
Пример #16
0
def _reql_timeout(seconds):
    '''
    Run a block with a timeout, raising `ReqlTimeoutError` if the block
    execution exceeds the timeout.

    :param float seconds: A timeout in seconds. If None, then no timeout is
        enforced.
    :raises ReqlTimeoutError: If execution time exceeds the timeout.
    '''
    if seconds is None:
        yield
    else:
        try:
            with trio.fail_after(seconds):
                yield
        except trio.TooSlow:
            raise ReqlTimeoutError()
Пример #17
0
async def test_repeated_event_wait(nursery, autojump_clock):
    done = trio.Event()
    event = RepeatedEvent()

    event.set()
    with pytest.raises(trio.TooSlowError):
        with trio.fail_after(1):
            await event.wait()

    @nursery.start_soon
    async def _listener():
        await event.wait()
        done.set()

    await wait_all_tasks_blocked()
    event.set()
    await done.wait()
Пример #18
0
async def test_alexandria_client_send_pong(bob, bob_network,
                                           alice_alexandria_client):
    async with bob_network.dispatcher.subscribe(
            TalkResponseMessage) as subscription:
        await alice_alexandria_client.send_pong(
            bob.node_id,
            bob.endpoint,
            enr_seq=1234,
            advertisement_radius=4321,
            request_id=b"\x01\x02",
        )
        with trio.fail_after(1):
            talk_response = await subscription.receive()
        message = decode_message(talk_response.message.payload)
        assert isinstance(message, PongMessage)
        assert message.payload.enr_seq == 1234
        assert message.payload.advertisement_radius == 4321
Пример #19
0
    async def _mallory_claim():
        async with backend_anonymous_cmds_factory(mallory.organization_addr) as cmds:
            rep = await cmds.user_get_invitation_creator(mallory.user_id)
            assert rep["trustchain"] == {"devices": [], "revoked_users": [], "users": []}
            creator = UserCertificateContent.unsecure_load(rep["user_certificate"])
            creator_device = DeviceCertificateContent.unsecure_load(rep["device_certificate"])
            assert creator_device.device_id.user_id == creator.user_id

            encrypted_claim = UserClaimContent(
                device_id=mallory.device_id,
                token=token,
                public_key=mallory.public_key,
                verify_key=mallory.verify_key,
            ).dump_and_encrypt_for(recipient_pubkey=creator.public_key)
            with trio.fail_after(1):
                rep = await cmds.user_claim(mallory.user_id, encrypted_claim)
                assert rep["status"] == "ok"
    async def test_receive_challenges_add(self, nursery):
        """
        The recieve_challenges process adds challenges with content to the
        challenge server's internal cache.
        """
        http01 = HTTP01Server.build()
        chal_tx, chal_rx = trio.open_memory_channel(0)

        with trio.fail_after(2):
            nursery.start_soon(http01.receive_challenges, chal_rx)
            await chal_tx.send(Challenge("id1", "content1"))
            await all_tasks_idle()
            assert http01.challenges == {"id1": "content1"}

            await chal_tx.send(Challenge("id2", "content2"))
            await all_tasks_idle()
            assert http01.challenges == {"id1": "content1", "id2": "content2"}
Пример #21
0
 async def do_get_locations(
         node: Node, send_channel: trio.abc.SendChannel[Node]) -> None:
     async with send_channel:
         try:
             with trio.fail_after(LOCATE_TIMEOUT):
                 locations = await self.locate(node, key=key)
         except trio.TooSlowError:
             self.logger.debug(
                 "Timeout getting locations: node=%s  key=%r",
                 node,
                 key,
             )
         else:
             for location in locations:
                 if location.node_id == self.client.local_node_id:
                     continue
                 await send_channel.send(location)
Пример #22
0
async def test_repeated_event_repeat_last(autojump_clock):
    event = RepeatedEvent()

    # no event was set, repeat_last=True will still iterate immediately
    async for _ in event.events(repeat_last=True):
        break

    # set between listener sessions is missed
    event.set()
    with pytest.raises(trio.TooSlowError):
        with trio.fail_after(1):
            async for _ in event.events():
                break

    # repeat_last=True will still iterate immediately
    async for _ in event.events(repeat_last=True):
        break
Пример #23
0
async def test_run():
    checkpoint = trio.lowlevel.checkpoint

    async def raise_err():
        await checkpoint()
        raise ValueError("Some err")

    async def return_arg(arg):
        await checkpoint()
        return arg

    with trio.fail_after(2):
        async with open_worker_pool(2) as pool:
            assert 7 == await pool.run(return_arg, 7)
            with pytest.raises(ValueError, match="Some err"):
                await pool.run(raise_err)
            assert 9 == await pool.run(return_arg, 9)
async def main():
    with trio.move_on_after(3):
        print("Went to sleep")
        await trio.sleep(2)
        print("Still sleeping")
        await trio.sleep(2)
        print("Woke up on my own")
    print("Nap time is over")

    try:
        with trio.fail_after(2):
            async with open_nursery() as n:
                n.start_soon(trio.sleep, 1)
                n.start_soon(trio.sleep, 3)
                await trio.sleep(4)
    except trio.TooSlowError as e:
        print(f"Took too long: {e!r}")
Пример #25
0
    async def write_batch(self, msg_batch, batch_seq_nums=None, m_of_n_quorum=None, corrupt_params=None, no_retries=False):
        if not self.comm_prepared:
            await self._comm_prepare()

        cid = str(self.req_seq_num.next())
        batch_size = len(msg_batch)

        if batch_seq_nums is None:
            batch_seq_nums = []
            for n in range(batch_size):
                batch_seq_nums.append(self.req_seq_num.next())

        msg_data = b''
        req_index_to_corrupt = random.randint(1, batch_size-1) # don't corrupt the 1st
        for n in range(batch_size):
            msg = msg_batch[n]
            msg_seq_num = batch_seq_nums[n]
            msg_cid = str(msg_seq_num)

            signature = b''
            client_id = self.client_id
            if self.signing_key:
                h = SHA256.new(msg)
                signature = pkcs1_15.new(self.signing_key).sign(h)
                if corrupt_params and (req_index_to_corrupt == n):
                    msg, signature, client_id = self._corrupt_signing_params(msg, signature, client_id, corrupt_params)

            msg_data = b''.join([msg_data, bft_msgs.pack_request(
                self.client_id, msg_seq_num, False, self.config.req_timeout_milli, msg_cid, msg, 0, True,
                reconfiguration=False, span_context=b'', signature=signature)])

        data = bft_msgs.pack_batch_request(self.client_id, batch_size, msg_data, cid)

        if m_of_n_quorum is None:
            m_of_n_quorum = MofNQuorum.LinearizableQuorum(self.config, [r.id for r in self.replicas])

        # Raise a trio.TooSlowError exception if a quorum of replies
        try:
            with trio.fail_after(batch_size * self.config.req_timeout_milli / 1000):
                self._reset_on_new_request(batch_seq_nums)
                return await self._send_receive_loop(data, False, m_of_n_quorum,
                    batch_size * self.config.retry_timeout_milli / 1000, no_retries=no_retries)
        except trio.TooSlowError:
            raise trio.TooSlowError(f"client_id {self.client_id}, for batch msg {cid} {batch_seq_nums}")
        finally:
            pass
Пример #26
0
    async def start_tls(self, hostname: bytes, ssl_context: SSLContext,
                        timeout: TimeoutDict) -> "SocketStream":
        connect_timeout = none_as_inf(timeout.get("connect"))
        exc_map = {
            trio.TooSlowError: ConnectTimeout,
            trio.BrokenResourceError: ConnectError,
        }
        ssl_stream = trio.SSLStream(
            self.stream,
            ssl_context=ssl_context,
            server_hostname=hostname.decode("ascii"),
        )

        with map_exceptions(exc_map):
            with trio.fail_after(connect_timeout):
                await ssl_stream.do_handshake()
            return SocketStream(ssl_stream)
Пример #27
0
async def test_trio_service_manager_run_task_waits_for_task_completion():
    task_event = trio.Event()

    @as_service
    async def RunTaskService(manager):
        async def task_fn():
            await trio.sleep(0.01)
            task_event.set()

        manager.run_task(task_fn)
        # the task is set to run in the background but then  the service exits.
        # We want to be sure that the task is allowed to continue till
        # completion unless explicitely cancelled.

    async with background_service(RunTaskService()):
        with trio.fail_after(0.1):
            await task_event.wait()
Пример #28
0
    async def wait_for(self, predicate, timeout, interval):
        """
        Wait for the given async predicate function to return true. Give up
        waiting for the async function to complete after interval (seconds) and retry
        until timeout (seconds) expires. Raise trio.TooSlowError when timeout expires.

        Important:
         * The given predicate function must be async
         * Retries may occur more frequently than interval if the predicate
           returns false before interval expires. This only matters in that it
           uses more CPU.
        """
        with trio.fail_after(timeout):
            while True:
                with trio.move_on_after(interval):
                    if await predicate():
                        return
Пример #29
0
async def test_request_handling(message_dispatcher, incoming_message_channels,
                                remote_enr, remote_endpoint):
    ping_send_channel, ping_receive_channel = trio.open_memory_channel(0)

    async with message_dispatcher.add_request_handler(
            PingMessage) as request_subscription:

        incoming_message = IncomingMessage(
            message=PingMessageFactory(),
            sender_endpoint=remote_endpoint,
            sender_node_id=remote_enr.node_id,
        )
        await incoming_message_channels[0].send(incoming_message)

        with trio.fail_after(1):
            handled_incoming_message = await request_subscription.receive()
        assert handled_incoming_message == incoming_message
Пример #30
0
 async def run_check(self, address: str) -> None:  # {{{
     fmt = {"address": address}
     cmdline = [x % fmt for x in self.check]
     logprefix = "%s,%s" % (self.logprefix, address)
     try:
         self.logger.debug(logprefix, "launching %r" % (cmdline, ))
         with trio.fail_after(self.timeout):
             result = await trio.run_process(cmdline, capture_stdout=True)
         outu = result.stdout.decode('utf-8', 'ignore')
         #FIXME: propagate some (sort of) error string? E.g. to prometheus/victoriadb?
         if result.returncode != 0:
             self.logger.debug(
                 logprefix,
                 "destination failed rc:%d" % (result.returncode, ))
             self.results[address] = HealthCheckResult(result.returncode)
         elif self.regex.search(outu):
             if self.prio_regex is not None:
                 m = self.prio_regex.search(outu)
                 if m:
                     try:
                         prio = int(cast(str, m.group(1)))
                     except ValueError:
                         prio = len(cast(str, m.group(1))) + 1
                     self.logger.debug(
                         logprefix,
                         "destination OK with priority %d" % (prio, ))
                 else:
                     prio = -1
                     self.logger.debug(
                         logprefix,
                         "destination OK, priority not matched (disabling by prio: -1)"
                     )
                 self.results[address] = HealthCheckResult(
                     result.returncode, prio)
             else:
                 self.logger.debug(logprefix, "destination OK")
                 self.results[address] = HealthCheckResult(
                     result.returncode, 1)
         else:
             self.logger.debug(logprefix,
                               "destination failed: regex not matched")
             self.results[address] = HealthCheckResult(
                 HealthCheckResult.NOT_MATCHED)
     except Exception as e:
         self.logger.warning(logprefix, "check error: %s" % (e, ))
         pass
Пример #31
0
async def test_client_send_graph_delete(alice_and_bob_clients):
    alice, bob = alice_and_bob_clients

    async with bob.message_dispatcher.subscribe(GraphDelete) as subscription:
        request_id = await alice.send_graph_delete(
            bob.local_node,
            key=content_key_to_graph_key(b'key'),
        )

        with trio.fail_after(1):
            message = await subscription.receive()

        assert message.node == alice.local_node
        payload = message.payload
        assert isinstance(payload, GraphDelete)
        assert payload.request_id == request_id
        assert payload.key == b'key'
Пример #32
0
async def test_client_send_found_nodes(alice_and_bob_clients):
    alice, bob = alice_and_bob_clients

    async with bob.message_dispatcher.subscribe(FoundNodes) as subscription:
        total_messages = await alice.send_found_nodes(
            bob.local_node,
            request_id=1234,
            found_nodes=(),
        )

        with trio.fail_after(1):
            message = await subscription.receive()

        assert message.node == alice.local_node
        payload = message.payload
        assert isinstance(payload, FoundNodes)
        assert payload.total == total_messages
Пример #33
0
async def test_alert_durable1():
    async def check_durable(ev, task_status=trio.TASK_STATUS_IGNORED):
        def alert_me(x):
            return 2 * x

        async with unit(1) as unit1:
            await unit1.register(
                alert_me, "my.dur.alert1", call_conv=CC_DATA, durable=True, ttl=1, multiple=True
            )
        task_status.started()
        await trio.sleep(TIMEOUT / 2)
        async with unit(1) as unit1:
            await unit1.register(
                alert_me, "my.dur.alert1", call_conv=CC_DATA, durable=True, ttl=1, multiple=True
            )
            await ev.wait()

    ev = trio.Event()
    async with unit(2) as unit2:
        await unit2.nursery.start(check_durable, ev)
        with trio.fail_after(TIMEOUT):
            res = await unit2.poll_first("my.dur.alert1", 123)
        assert res == 246
        ev.set()