def start(self, context: PluginContext) -> None: if isinstance(self.chain, BaseMainnetChain): validator = DefaultTransactionValidator(self.chain, BYZANTIUM_MAINNET_BLOCK) elif isinstance(self.chain, BaseRopstenChain): validator = DefaultTransactionValidator(self.chain, BYZANTIUM_ROPSTEN_BLOCK) else: # TODO: We could hint the user about e.g. a --tx-pool-no-validation flag to run the # tx pool without tx validation in this case raise ValueError( "The TxPool plugin only supports MainnetChain or RopstenChain") tx_pool = TxPool(self.peer_pool, validator, self.cancel_token) asyncio.ensure_future(tx_pool.run())
async def bootstrap_test_setup(monkeypatch, request, event_loop, chain, tx_validator): peer1, peer2 = await get_directly_linked_peers( request, event_loop, ) # We intercept sub_proto.send_transactions to record detailed information # about which peer received what and was invoked how often. peer1_txs_recorder = create_tx_recorder(monkeypatch, peer1) peer2_txs_recorder = create_tx_recorder(monkeypatch, peer2) pool = TxPool(MockPeerPoolWithConnectedPeers([peer1, peer2]), tx_validator) asyncio.ensure_future(pool.run()) def finalizer(): event_loop.run_until_complete(pool.cancel()) request.addfinalizer(finalizer) return peer1, peer1_txs_recorder, peer2, peer2_txs_recorder, pool
async def test_tx_sending(request, event_loop, chain_with_block_validation, tx_validator): # This test covers the communication end to end whereas the previous # focusses on the rules of the transaction pool on when to send tx to whom peer1, peer2 = await get_directly_linked_peers( request, event_loop, peer1_class=ETHPeer, peer2_class=ETHPeer, ) peer2_subscriber = SamplePeerSubscriber() peer2.add_subscriber(peer2_subscriber) pool = TxPool(MockPeerPoolWithConnectedPeers([peer1, peer2]), tx_validator) asyncio.ensure_future(pool.run()) def finalizer(): event_loop.run_until_complete(pool.cancel()) request.addfinalizer(finalizer) txs = [create_random_tx(chain_with_block_validation)] peer1.sub_proto.send_transactions(txs) # Ensure that peer2 gets the transactions peer, cmd, msg = await asyncio.wait_for( peer2_subscriber.msg_queue.get(), timeout=0.1, ) assert peer == peer2 assert isinstance(cmd, Transactions) assert msg[0].hash == txs[0].hash
async def test_tx_propagation(event_bus, chain_with_block_validation, tx_validator): initial_two_peers = TEST_NODES[:2] node_one = initial_two_peers[0] node_two = initial_two_peers[1] async with run_proxy_peer_pool(event_bus) as peer_pool: outgoing_tx = observe_outgoing_transactions(event_bus) tx_pool = TxPool(event_bus, peer_pool, tx_validator) asyncio.ensure_future(tx_pool.run()) run_mock_request_response(GetConnectedPeersRequest, GetConnectedPeersResponse(initial_two_peers), event_bus) await asyncio.sleep(0.01) txs_broadcasted_by_peer1 = [ create_random_tx(chain_with_block_validation) ] # Peer1 sends some txs await event_bus.broadcast( TransactionsEvent(remote=node_one, msg=txs_broadcasted_by_peer1, cmd=Transactions)) await asyncio.sleep(0.01) assert outgoing_tx == [ (node_two, txs_broadcasted_by_peer1), ] # Clear the recording, we asserted all we want and would like to have a fresh start outgoing_tx.clear() # Peer1 sends same txs again await event_bus.broadcast( TransactionsEvent(remote=node_one, msg=txs_broadcasted_by_peer1, cmd=Transactions)) await asyncio.sleep(0.01) # Check that Peer2 doesn't receive them again assert len(outgoing_tx) == 0 # Peer2 sends exact same txs back await event_bus.broadcast( TransactionsEvent(remote=node_two, msg=txs_broadcasted_by_peer1, cmd=Transactions)) await asyncio.sleep(0.01) # Check that Peer1 won't get them as that is where they originally came from assert len(outgoing_tx) == 0 txs_broadcasted_by_peer2 = [ create_random_tx(chain_with_block_validation), txs_broadcasted_by_peer1[0] ] # Peer2 sends old + new tx await event_bus.broadcast( TransactionsEvent(remote=node_two, msg=txs_broadcasted_by_peer2, cmd=Transactions)) await asyncio.sleep(0.01) # Check that Peer1 receives only the one tx that it didn't know about assert outgoing_tx == [ (node_one, [txs_broadcasted_by_peer2[0]]), ]
class TxPlugin(AsyncioIsolatedPlugin): tx_pool: TxPool = None @property def name(self) -> str: return "TxPlugin" @classmethod def configure_parser(cls, arg_parser: ArgumentParser, subparser: _SubParsersAction) -> None: arg_parser.add_argument( "--tx-pool", action="store_true", help="Enables the Transaction Pool (experimental)", ) def on_ready(self, manager_eventbus: TrinityEventBusEndpoint) -> None: light_mode = self.boot_info.args.sync_mode == SYNC_LIGHT is_enabled = self.boot_info.args.tx_pool and not light_mode unsupported = self.boot_info.args.tx_pool and light_mode if is_enabled and not unsupported: self.start() elif unsupported: unsupported_msg = "Transaction pool not available in light mode" self.logger.error(unsupported_msg) manager_eventbus.request_shutdown(unsupported_msg) def do_start(self) -> None: trinity_config = self.boot_info.trinity_config db_manager = create_db_consumer_manager( trinity_config.database_ipc_path) db = db_manager.get_db() # type: ignore app_config = trinity_config.get_app_config(Eth1AppConfig) chain_config = app_config.get_chain_config() chain = chain_config.full_chain_class(db) if self.boot_info.trinity_config.network_id == MAINNET_NETWORK_ID: validator = DefaultTransactionValidator(chain, BYZANTIUM_MAINNET_BLOCK) elif self.boot_info.trinity_config.network_id == ROPSTEN_NETWORK_ID: validator = DefaultTransactionValidator(chain, BYZANTIUM_ROPSTEN_BLOCK) else: # TODO: We could hint the user about e.g. a --tx-pool-no-validation flag to run the # tx pool without tx validation in this case raise ValueError( "The TxPool plugin only supports MainnetChain or RopstenChain") proxy_peer_pool = ETHProxyPeerPool(self.event_bus, TO_NETWORKING_BROADCAST_CONFIG) self.tx_pool = TxPool(self.event_bus, proxy_peer_pool, validator) asyncio.ensure_future(self.tx_pool.run()) async def do_stop(self) -> None: # This isn't really needed for the standard shutdown case as the TxPool will automatically # shutdown whenever the `CancelToken` it was chained with is triggered. It may still be # useful to stop the TxPool plugin individually though. if self.tx_pool.is_operational: await self.tx_pool.cancel() self.logger.info("Successfully stopped TxPool")
class TxPlugin(BaseAsyncStopPlugin): peer_pool: ETHPeerPool = None cancel_token: CancelToken = None chain: BaseChain = None is_enabled: bool = False tx_pool: TxPool = None @property def name(self) -> str: return "TxPlugin" def configure_parser(self, arg_parser: ArgumentParser, subparser: _SubParsersAction) -> None: arg_parser.add_argument( "--tx-pool", action="store_true", help="Enables the Transaction Pool (experimental)", ) def on_ready(self, manager_eventbus: TrinityEventBusEndpoint) -> None: light_mode = self.context.args.sync_mode == SYNC_LIGHT self.is_enabled = self.context.args.tx_pool and not light_mode unsupported = self.context.args.tx_pool and light_mode if unsupported: unsupported_msg = "Transaction pool not available in light mode" self.logger.error(unsupported_msg) manager_eventbus.request_shutdown(unsupported_msg) self.event_bus.subscribe(ResourceAvailableEvent, self.handle_event) def handle_event(self, event: ResourceAvailableEvent) -> None: if self.running: return if event.resource_type is ETHPeerPool: self.peer_pool, self.cancel_token = event.resource elif event.resource_type is BaseChain: self.chain = event.resource if all((self.peer_pool is not None, self.chain is not None, self.is_enabled)): self.start() def do_start(self) -> None: if self.context.trinity_config.network_id == MAINNET_NETWORK_ID: validator = DefaultTransactionValidator(self.chain, BYZANTIUM_MAINNET_BLOCK) elif self.context.trinity_config.network_id == ROPSTEN_NETWORK_ID: validator = DefaultTransactionValidator(self.chain, BYZANTIUM_ROPSTEN_BLOCK) else: # TODO: We could hint the user about e.g. a --tx-pool-no-validation flag to run the # tx pool without tx validation in this case raise ValueError( "The TxPool plugin only supports MainnetChain or RopstenChain") self.tx_pool = TxPool(self.peer_pool, validator, self.cancel_token) asyncio.ensure_future(self.tx_pool.run()) async def do_stop(self) -> None: # This isn't really needed for the standard shutdown case as the TxPool will automatically # shutdown whenever the `CancelToken` it was chained with is triggered. It may still be # useful to stop the TxPool plugin individually though. if self.tx_pool.is_operational: await self.tx_pool.cancel() self.logger.info("Successfully stopped TxPool")