예제 #1
0
async def test_does_not_propagate_invalid_tx(event_bus,
                                             chain_with_block_validation,
                                             tx_validator):

    initial_two_peers = TEST_NODES[:2]
    node_one = initial_two_peers[0]
    node_two = initial_two_peers[1]

    async with run_proxy_peer_pool(event_bus) as peer_pool:
        outgoing_tx = observe_outgoing_transactions(event_bus)
        tx_pool = TxPool(event_bus, peer_pool, tx_validator)
        asyncio.ensure_future(tx_pool.run())

        run_mock_request_response(GetConnectedPeersRequest,
                                  GetConnectedPeersResponse(initial_two_peers),
                                  event_bus)

        await asyncio.sleep(0.01)

        txs_broadcasted_by_peer1 = [
            create_random_tx(chain_with_block_validation, is_valid=False),
            create_random_tx(chain_with_block_validation)
        ]

        # Peer1 sends some txs
        await event_bus.broadcast(
            TransactionsEvent(session=node_one,
                              msg=txs_broadcasted_by_peer1,
                              cmd=Transactions))
        await asyncio.sleep(0.01)

        # Check that Peer2 received only the second tx which is valid
        assert outgoing_tx == [
            (node_two, (txs_broadcasted_by_peer1[1], )),
        ]
예제 #2
0
    def do_start(self) -> None:

        trinity_config = self.boot_info.trinity_config
        db = DBClient.connect(trinity_config.database_ipc_path)

        app_config = trinity_config.get_app_config(Eth1AppConfig)
        chain_config = app_config.get_chain_config()

        chain = chain_config.full_chain_class(db)

        if self.boot_info.trinity_config.network_id == MAINNET_NETWORK_ID:
            validator = DefaultTransactionValidator(chain,
                                                    BYZANTIUM_MAINNET_BLOCK)
        elif self.boot_info.trinity_config.network_id == ROPSTEN_NETWORK_ID:
            validator = DefaultTransactionValidator(chain,
                                                    BYZANTIUM_ROPSTEN_BLOCK)
        else:
            raise ValueError(
                "The TxPool component only supports MainnetChain or RopstenChain"
            )

        proxy_peer_pool = ETHProxyPeerPool(self.event_bus,
                                           TO_NETWORKING_BROADCAST_CONFIG)

        self.tx_pool = TxPool(self.event_bus, proxy_peer_pool, validator)
        asyncio.ensure_future(
            exit_with_services(self.tx_pool, self._event_bus_service))
        asyncio.ensure_future(self.tx_pool.run())
예제 #3
0
async def two_connected_tx_pools(event_bus, other_event_bus, event_loop,
                                 funded_address_private_key,
                                 chain_with_block_validation, tx_validator,
                                 client_and_server):

    alice_event_bus = event_bus
    bob_event_bus = other_event_bus
    bob, alice = client_and_server

    bob_peer_pool = MockPeerPoolWithConnectedPeers([bob],
                                                   event_bus=bob_event_bus)
    alice_peer_pool = MockPeerPoolWithConnectedPeers([alice],
                                                     event_bus=alice_event_bus)

    async with contextlib.AsyncExitStack() as stack:
        await stack.enter_async_context(
            run_peer_pool_event_server(bob_event_bus,
                                       bob_peer_pool,
                                       handler_type=ETHPeerPoolEventServer))

        await stack.enter_async_context(
            run_peer_pool_event_server(alice_event_bus,
                                       alice_peer_pool,
                                       handler_type=ETHPeerPoolEventServer))

        bob_proxy_peer_pool = ETHProxyPeerPool(bob_event_bus,
                                               TO_NETWORKING_BROADCAST_CONFIG)
        await stack.enter_async_context(
            background_asyncio_service(bob_proxy_peer_pool))

        alice_proxy_peer_pool = ETHProxyPeerPool(
            alice_event_bus, TO_NETWORKING_BROADCAST_CONFIG)
        await stack.enter_async_context(
            background_asyncio_service(alice_proxy_peer_pool))

        alice_tx_pool = TxPool(
            alice_event_bus,
            alice_proxy_peer_pool,
            tx_validator,
        )
        await stack.enter_async_context(
            background_asyncio_service(alice_tx_pool))

        bob_tx_pool = TxPool(
            bob_event_bus,
            bob_proxy_peer_pool,
            tx_validator,
        )
        await stack.enter_async_context(background_asyncio_service(bob_tx_pool)
                                        )

        yield (
            alice,
            alice_event_bus,
            alice_tx_pool,
        ), (bob, bob_event_bus, bob_tx_pool)
예제 #4
0
async def test_does_not_propagate_invalid_tx(event_bus,
                                             funded_address_private_key,
                                             chain_with_block_validation,
                                             tx_validator):

    initial_two_peers = TEST_NODES[:2]
    node_one = initial_two_peers[0]
    node_two = initial_two_peers[1]

    async with run_proxy_peer_pool(event_bus) as peer_pool:
        tx_pool = TxPool(event_bus, peer_pool, tx_validator)
        async with run_service(tx_pool):
            run_mock_request_response(
                GetConnectedPeersRequest, GetConnectedPeersResponse(initial_two_peers), event_bus)

            await asyncio.sleep(0.01)

            txs_broadcasted_by_peer1 = [
                create_random_tx(chain_with_block_validation, funded_address_private_key, is_valid=False),  # noqa: E501
                create_random_tx(chain_with_block_validation, funded_address_private_key)
            ]

            outgoing_tx, got_txns = observe_outgoing_transactions(event_bus)

            # Peer1 sends some txs
            await event_bus.broadcast(
                TransactionsEvent(session=node_one, command=Transactions(txs_broadcasted_by_peer1))
            )
            await asyncio.wait_for(got_txns.wait(), timeout=0.1)

            # Check that Peer2 received only the second tx which is valid
            assert outgoing_tx == [
                (node_two, (txs_broadcasted_by_peer1[1],)),
            ]
예제 #5
0
    async def do_run(cls, boot_info: BootInfo, event_bus: EndpointAPI) -> None:
        trinity_config = boot_info.trinity_config
        db = DBClient.connect(trinity_config.database_ipc_path)

        app_config = trinity_config.get_app_config(Eth1AppConfig)
        chain_config = app_config.get_chain_config()

        chain = chain_config.full_chain_class(db)

        if boot_info.trinity_config.network_id == MAINNET_NETWORK_ID:
            validator = DefaultTransactionValidator(chain,
                                                    PETERSBURG_MAINNET_BLOCK)
        elif boot_info.trinity_config.network_id == ROPSTEN_NETWORK_ID:
            validator = DefaultTransactionValidator(chain,
                                                    PETERSBURG_ROPSTEN_BLOCK)
        else:
            raise Exception("This code path should not be reachable")

        proxy_peer_pool = ETHProxyPeerPool(event_bus,
                                           TO_NETWORKING_BROADCAST_CONFIG)

        tx_pool = TxPool(event_bus, proxy_peer_pool, validator)

        async with run_service(tx_pool):
            await tx_pool.cancellation()
예제 #6
0
    async def do_run(cls, boot_info: BootInfo, event_bus: EndpointAPI) -> None:
        trinity_config = boot_info.trinity_config
        db = DBClient.connect(trinity_config.database_ipc_path)
        with db:
            app_config = trinity_config.get_app_config(Eth1AppConfig)
            chain_config = app_config.get_chain_config()

            chain = chain_config.full_chain_class(db)

            if boot_info.trinity_config.network_id == MAINNET_NETWORK_ID:
                validator = DefaultTransactionValidator(
                    chain, ISTANBUL_MAINNET_BLOCK)
            elif boot_info.trinity_config.network_id == ROPSTEN_NETWORK_ID:
                validator = DefaultTransactionValidator(
                    chain, ISTANBUL_ROPSTEN_BLOCK)
            elif boot_info.trinity_config.network_id == GOERLI_NETWORK_ID:
                validator = DefaultTransactionValidator(
                    chain, ISTANBUL_GOERLI_BLOCK)
            else:
                raise Exception("This code path should not be reachable")

            proxy_peer_pool = ETHProxyPeerPool(event_bus,
                                               TO_NETWORKING_BROADCAST_CONFIG)
            async with background_asyncio_service(proxy_peer_pool):
                tx_pool = TxPool(event_bus, proxy_peer_pool, validator)
                async with background_asyncio_service(tx_pool) as manager:
                    await manager.wait_finished()
예제 #7
0
async def test_get_pooled_transactions_request(request, event_bus,
                                               other_event_bus, event_loop,
                                               chaindb_20, client_and_server):
    server_event_bus = event_bus
    client_event_bus = other_event_bus
    client_peer, server_peer = client_and_server

    if get_highest_eth_protocol_version(client_peer) < ETHProtocolV65.version:
        pytest.skip("Test not applicable below eth/65")

    client_peer_pool = MockPeerPoolWithConnectedPeers(
        [client_peer], event_bus=client_event_bus)
    server_peer_pool = MockPeerPoolWithConnectedPeers(
        [server_peer], event_bus=server_event_bus)

    async with contextlib.AsyncExitStack() as stack:
        await stack.enter_async_context(
            run_peer_pool_event_server(client_event_bus,
                                       client_peer_pool,
                                       handler_type=ETHPeerPoolEventServer))

        await stack.enter_async_context(
            run_peer_pool_event_server(server_event_bus,
                                       server_peer_pool,
                                       handler_type=ETHPeerPoolEventServer))

        client_proxy_peer_pool = ETHProxyPeerPool(
            client_event_bus, TO_NETWORKING_BROADCAST_CONFIG)
        await stack.enter_async_context(
            background_asyncio_service(client_proxy_peer_pool))

        proxy_peer_pool = ETHProxyPeerPool(server_event_bus,
                                           TO_NETWORKING_BROADCAST_CONFIG)
        await stack.enter_async_context(
            background_asyncio_service(proxy_peer_pool))

        proxy_peer = await client_proxy_peer_pool.ensure_proxy_peer(
            client_peer.session)

        # The reason we run this test separately from the other request tests is because
        # GetPooledTransactions requests should be answered from the tx pool which the previous
        # test does not depend on.
        await stack.enter_async_context(
            background_asyncio_service(
                TxPool(server_event_bus, proxy_peer_pool, lambda _: True)))

        # The tx pool always answers these with an empty response
        txs = await proxy_peer.eth_api.get_pooled_transactions((decode_hex(
            '0x9ea39df6210064648ecbc465cd628fe52f69af53792e1c2f27840133435159d4'
        ), ))
        assert len(txs) == 0
예제 #8
0
async def test_does_not_propagate_invalid_tx(event_bus,
                                             funded_address_private_key,
                                             chain_with_block_validation,
                                             tx_validator):
    chain = chain_with_block_validation

    initial_two_peers = TEST_NODES[:2]
    node_one = initial_two_peers[0]
    node_two = initial_two_peers[1]

    async with AsyncExitStack() as stack:
        await stack.enter_async_context(
            mock_request_response(
                GetConnectedPeersRequest,
                GetConnectedPeersResponse(initial_two_peers),
                event_bus,
            ))

        peer_pool = ETHProxyPeerPool(event_bus, TO_NETWORKING_BROADCAST_CONFIG)
        await stack.enter_async_context(run_service(peer_pool))

        tx_pool = TxPool(event_bus, peer_pool, tx_validator)
        await stack.enter_async_context(background_asyncio_service(tx_pool))

        await asyncio.sleep(0.01)

        txs_broadcasted_by_peer1 = [
            create_random_tx(chain, funded_address_private_key,
                             is_valid=False),
            create_random_tx(chain, funded_address_private_key)
        ]

        outgoing_tx, got_txns = observe_outgoing_transactions(event_bus)

        # Peer1 sends some txs
        await event_bus.broadcast(
            TransactionsEvent(session=node_one,
                              command=Transactions(txs_broadcasted_by_peer1)))
        await asyncio.wait_for(got_txns.wait(), timeout=0.1)

        # Check that Peer2 received only the second tx which is valid
        assert outgoing_tx == [
            (node_two, (txs_broadcasted_by_peer1[1], )),
        ]
예제 #9
0
    async def do_run(self, event_bus: EndpointAPI) -> None:
        boot_info = self._boot_info
        trinity_config = boot_info.trinity_config
        db = DBClient.connect(trinity_config.database_ipc_path)
        with db:
            app_config = trinity_config.get_app_config(Eth1AppConfig)
            chain_config = app_config.get_chain_config()

            chain = chain_config.full_chain_class(db)

            validator = DefaultTransactionValidator.from_network_id(
                chain,
                boot_info.trinity_config.network_id,
            )

            proxy_peer_pool = ETHProxyPeerPool(event_bus, TO_NETWORKING_BROADCAST_CONFIG)
            async with background_asyncio_service(proxy_peer_pool):
                tx_pool = TxPool(event_bus, proxy_peer_pool, validator)
                async with background_asyncio_service(tx_pool) as manager:
                    await manager.wait_finished()
예제 #10
0
async def test_tx_propagation(event_bus,
                              funded_address_private_key,
                              chain_with_block_validation,
                              tx_validator):

    initial_two_peers = TEST_NODES[:2]
    node_one = initial_two_peers[0]
    node_two = initial_two_peers[1]

    async with run_proxy_peer_pool(event_bus) as peer_pool:
        tx_pool = TxPool(event_bus, peer_pool, tx_validator)
        async with run_service(tx_pool):

            run_mock_request_response(
                GetConnectedPeersRequest, GetConnectedPeersResponse(initial_two_peers), event_bus)

            await asyncio.sleep(0.01)

            txs_broadcasted_by_peer1 = [
                create_random_tx(chain_with_block_validation, funded_address_private_key)
            ]

            # this needs to go here to ensure that the subscription is *after*
            # the one installed by the transaction pool so that the got_txns
            # event will get set after the other handlers have been called.
            outgoing_tx, got_txns = observe_outgoing_transactions(event_bus)

            # Peer1 sends some txs
            await event_bus.broadcast(
                TransactionsEvent(session=node_one, command=Transactions(txs_broadcasted_by_peer1))
            )

            await asyncio.wait_for(got_txns.wait(), timeout=0.1)

            assert outgoing_tx == [
                (node_two, tuple(txs_broadcasted_by_peer1)),
            ]
            # Clear the recording, we asserted all we want and would like to have a fresh start
            outgoing_tx.clear()

            # Peer1 sends same txs again
            await event_bus.broadcast(
                TransactionsEvent(session=node_one, command=Transactions(txs_broadcasted_by_peer1))
            )
            await asyncio.wait_for(got_txns.wait(), timeout=0.1)
            # Check that Peer2 doesn't receive them again
            assert len(outgoing_tx) == 0

            # Peer2 sends exact same txs back
            await event_bus.broadcast(
                TransactionsEvent(session=node_two, command=Transactions(txs_broadcasted_by_peer1))
            )
            await asyncio.wait_for(got_txns.wait(), timeout=0.1)

            # Check that Peer1 won't get them as that is where they originally came from
            assert len(outgoing_tx) == 0

            txs_broadcasted_by_peer2 = [
                create_random_tx(chain_with_block_validation, funded_address_private_key),
                txs_broadcasted_by_peer1[0]
            ]

            # Peer2 sends old + new tx
            await event_bus.broadcast(
                TransactionsEvent(session=node_two, command=Transactions(txs_broadcasted_by_peer2))
            )
            await asyncio.wait_for(got_txns.wait(), timeout=0.1)
            # Not sure why this sleep is needed....
            await asyncio.sleep(0.01)

            # Check that Peer1 receives only the one tx that it didn't know about
            assert outgoing_tx == [
                (node_one, (txs_broadcasted_by_peer2[0],)),
            ]
예제 #11
0
async def test_tx_propagation(event_bus, chain_with_block_validation,
                              tx_validator):

    initial_two_peers = TEST_NODES[:2]
    node_one = initial_two_peers[0]
    node_two = initial_two_peers[1]

    async with run_proxy_peer_pool(event_bus) as peer_pool:
        outgoing_tx = observe_outgoing_transactions(event_bus)
        tx_pool = TxPool(event_bus, peer_pool, tx_validator)
        async with run_service(tx_pool):

            run_mock_request_response(
                GetConnectedPeersRequest,
                GetConnectedPeersResponse(initial_two_peers), event_bus)

            await asyncio.sleep(0.01)

            txs_broadcasted_by_peer1 = [
                create_random_tx(chain_with_block_validation)
            ]

            # Peer1 sends some txs
            await event_bus.broadcast(
                TransactionsEvent(session=node_one,
                                  msg=txs_broadcasted_by_peer1,
                                  cmd=Transactions)  # noqa: E501
            )

            await asyncio.sleep(0.01)
            assert outgoing_tx == [
                (node_two, tuple(txs_broadcasted_by_peer1)),
            ]
            # Clear the recording, we asserted all we want and would like to have a fresh start
            outgoing_tx.clear()

            # Peer1 sends same txs again
            await event_bus.broadcast(
                TransactionsEvent(session=node_one,
                                  msg=txs_broadcasted_by_peer1,
                                  cmd=Transactions)  # noqa: E501
            )
            await asyncio.sleep(0.01)
            # Check that Peer2 doesn't receive them again
            assert len(outgoing_tx) == 0

            # Peer2 sends exact same txs back
            await event_bus.broadcast(
                TransactionsEvent(session=node_two,
                                  msg=txs_broadcasted_by_peer1,
                                  cmd=Transactions)  # noqa: E501
            )
            await asyncio.sleep(0.01)

            # Check that Peer1 won't get them as that is where they originally came from
            assert len(outgoing_tx) == 0

            txs_broadcasted_by_peer2 = [
                create_random_tx(chain_with_block_validation),
                txs_broadcasted_by_peer1[0]
            ]

            # Peer2 sends old + new tx
            await event_bus.broadcast(
                TransactionsEvent(session=node_two,
                                  msg=txs_broadcasted_by_peer2,
                                  cmd=Transactions)  # noqa: E501
            )
            await asyncio.sleep(0.01)

            # Check that Peer1 receives only the one tx that it didn't know about
            assert outgoing_tx == [
                (node_one, (txs_broadcasted_by_peer2[0], )),
            ]
예제 #12
0
class TxComponent(AsyncioIsolatedComponent):
    tx_pool: TxPool = None

    @property
    def name(self) -> str:
        return "TxComponent"

    @classmethod
    def configure_parser(cls, arg_parser: ArgumentParser,
                         subparser: _SubParsersAction) -> None:
        arg_parser.add_argument(
            "--disable-tx-pool",
            action="store_true",
            help="Disables the Transaction Pool",
        )

    def on_ready(self, manager_eventbus: EndpointAPI) -> None:

        light_mode = self.boot_info.args.sync_mode == SYNC_LIGHT
        is_disable = self.boot_info.args.disable_tx_pool
        is_supported = not light_mode
        is_enabled = not is_disable and is_supported

        if is_disable:
            self.logger.debug("Transaction pool disabled")
        elif not is_supported:
            self.logger.warning(
                "Transaction pool disabled.  Not supported in light mode.")
        elif is_enabled:
            self.start()
        else:
            raise Exception("This code path should be unreachable")

    def do_start(self) -> None:

        trinity_config = self.boot_info.trinity_config
        db = DBClient.connect(trinity_config.database_ipc_path)

        app_config = trinity_config.get_app_config(Eth1AppConfig)
        chain_config = app_config.get_chain_config()

        chain = chain_config.full_chain_class(db)

        if self.boot_info.trinity_config.network_id == MAINNET_NETWORK_ID:
            validator = DefaultTransactionValidator(chain,
                                                    BYZANTIUM_MAINNET_BLOCK)
        elif self.boot_info.trinity_config.network_id == ROPSTEN_NETWORK_ID:
            validator = DefaultTransactionValidator(chain,
                                                    BYZANTIUM_ROPSTEN_BLOCK)
        else:
            raise ValueError(
                "The TxPool component only supports MainnetChain or RopstenChain"
            )

        proxy_peer_pool = ETHProxyPeerPool(self.event_bus,
                                           TO_NETWORKING_BROADCAST_CONFIG)

        self.tx_pool = TxPool(self.event_bus, proxy_peer_pool, validator)
        asyncio.ensure_future(
            exit_with_services(self.tx_pool, self._event_bus_service))
        asyncio.ensure_future(self.tx_pool.run())

    async def do_stop(self) -> None:
        # This isn't really needed for the standard shutdown case as the TxPool will automatically
        # shutdown whenever the `CancelToken` it was chained with is triggered. It may still be
        # useful to stop the TxPool component individually though.
        if self.tx_pool.is_operational:
            await self.tx_pool.cancel()
            self.logger.info("Successfully stopped TxPool")