def test_prometheus_event_handling_raise_exception(pathfinding_service_mock_empty): metrics_state = save_metrics_state(metrics.REGISTRY) pfs = pathfinding_service_mock_empty event = ReceiveTokenNetworkCreatedEvent( token_address=TokenAddress(bytes([1] * 20)), token_network_address=TokenNetworkAddress(bytes([2] * 20)), settle_timeout=DEFAULT_TOKEN_NETWORK_SETTLE_TIMEOUT, block_number=BlockNumber(1), ) pfs.handle_token_network_created = Mock(side_effect=KeyError()) with pytest.raises(KeyError): pfs.handle_event(event) # The exceptions raised in the wrapped part of the prometheus logging # will not be handled anywhere at the moment. # Force an exception and test correct logging of it anyways, # since at some point higher in the call stack we could catch exceptions. assert ( metrics_state.get_delta( "events_exceptions_total", labels={"event_type": "ReceiveTokenNetworkCreatedEvent"}, ) == 1.0 )
def test_save_and_load_token_networks(pathfinding_service_mock_empty): pfs = pathfinding_service_mock_empty token_address = TokenAddress(bytes([1] * 20)) token_network_address = TokenNetworkAddress(bytes([2] * 20)) channel_id = ChannelID(1) p1 = Address(bytes([3] * 20)) p2 = Address(bytes([4] * 20)) events = [ ReceiveTokenNetworkCreatedEvent( token_address=token_address, token_network_address=token_network_address, settle_timeout=DEFAULT_TOKEN_NETWORK_SETTLE_TIMEOUT, block_number=BlockNumber(1), ), ReceiveChannelOpenedEvent( token_network_address=token_network_address, channel_identifier=channel_id, participant1=p1, participant2=p2, block_number=BlockNumber(2), ), ] for event in events: pfs.handle_event(event) assert len(pfs.token_networks) == 1 loaded_networks = pfs._load_token_networks() # pylint: disable=protected-access assert len(loaded_networks) == 1 orig = list(pfs.token_networks.values())[0] loaded = list(loaded_networks.values())[0] assert loaded.address == orig.address assert loaded.channel_id_to_addresses == orig.channel_id_to_addresses assert loaded.G.nodes == orig.G.nodes
def test_logging_processor(): # test if our logging processor changes bytes to checksum addresses # even if bytes-addresses are entangled into events logger = Mock() log_method = Mock() address = TokenAddress(b"\x7f[\xf6\xc9To\xa8\x185w\xe4\x9f\x15\xbc\xef@mr\xd5\xd9") address_log = format_to_hex( _logger=logger, _log_method=log_method, event_dict=dict(address=address) ) assert to_checksum_address(address) == address_log["address"] address2 = Address(b"\x7f[\xf6\xc9To\xa8\x185w\xe4\x9f\x15\xbc\xef@mr\xd5\xd1") event = ReceiveTokenNetworkCreatedEvent( token_address=address, token_network_address=TokenNetworkAddress(address2), settle_timeout=DEFAULT_TOKEN_NETWORK_SETTLE_TIMEOUT, block_number=BlockNumber(1), ) event_log = format_to_hex(_logger=logger, _log_method=log_method, event_dict=dict(event=event)) assert ( # pylint: disable=unsubscriptable-object to_checksum_address(address) == event_log["event"]["token_address"] ) assert ( # pylint: disable=unsubscriptable-object to_checksum_address(address2) == event_log["event"]["token_network_address"] ) assert ( # pylint: disable=unsubscriptable-object event_log["event"]["type_name"] == "ReceiveTokenNetworkCreatedEvent" ) message = PFSFeeUpdate( canonical_identifier=CanonicalIdentifier( chain_identifier=ChainID(61), token_network_address=TokenNetworkAddress(address), channel_identifier=ChannelID(1), ), updating_participant=PARTICIPANT1, fee_schedule=FeeScheduleState(), timestamp=datetime.utcnow(), signature=EMPTY_SIGNATURE, ) message_log = format_to_hex( _logger=logger, _log_method=log_method, event_dict=dict(message=message) ) assert ( # pylint: disable=unsubscriptable-object to_checksum_address(address) == message_log["message"]["canonical_identifier"]["token_network_address"] ) assert ( # pylint: disable=unsubscriptable-object message_log["message"]["type_name"] == "PFSFeeUpdate" )
def test_token_network_created_handlers_add_network(context: Context): event = ReceiveTokenNetworkCreatedEvent( token_address=DEFAULT_TOKEN_ADDRESS, token_network_address=DEFAULT_TOKEN_NETWORK_ADDRESS, block_number=BlockNumber(12), ) assert len(context.database.get_token_network_addresses()) == 0 token_network_created_handler(event, context) assert len(context.database.get_token_network_addresses()) == 1 # Test idempotency token_network_created_handler(event, context) assert len(context.database.get_token_network_addresses()) == 1
def test_prometheus_event_handling_no_exceptions(pathfinding_service_mock_empty): metrics_state = save_metrics_state(metrics.REGISTRY) pfs = pathfinding_service_mock_empty token_address = TokenAddress(bytes([1] * 20)) token_network_address = TokenNetworkAddress(bytes([2] * 20)) channel_id = ChannelID(1) p1 = Address(bytes([3] * 20)) p2 = Address(bytes([4] * 20)) events = [ ReceiveTokenNetworkCreatedEvent( token_address=token_address, token_network_address=token_network_address, settle_timeout=DEFAULT_TOKEN_NETWORK_SETTLE_TIMEOUT, block_number=BlockNumber(1), ), ReceiveChannelOpenedEvent( token_network_address=token_network_address, channel_identifier=channel_id, participant1=p1, participant2=p2, block_number=BlockNumber(2), ), ] for event in events: pfs.handle_event(event) # check that we have non-zero processing time for the events we created assert ( metrics_state.get_delta( "events_processing_duration_seconds_sum", labels={"event_type": event.__class__.__name__}, ) > 0.0 ) # there should be no exception raised assert ( metrics_state.get_delta( "events_exceptions_total", labels={"event_type": event.__class__.__name__} ) == 0.0 )
def test_token_network_created(pathfinding_service_mock): token_address = Address(bytes([1] * 20)) token_network_address = TokenNetworkAddress(bytes(bytes([2] * 20))) network_event = ReceiveTokenNetworkCreatedEvent( token_address=token_address, token_network_address=token_network_address, block_number=BlockNumber(1), ) assert not pathfinding_service_mock.follows_token_network(token_network_address) assert len(pathfinding_service_mock.token_networks) == 1 pathfinding_service_mock.handle_event(network_event) assert pathfinding_service_mock.follows_token_network(token_network_address) assert len(pathfinding_service_mock.token_networks) == 2 # Test idempotency pathfinding_service_mock.handle_event(network_event) assert pathfinding_service_mock.follows_token_network(token_network_address) assert len(pathfinding_service_mock.token_networks) == 2
def test_token_network_created(pathfinding_service_mock): token_address = TokenAddress(bytes([1] * 20)) token_network_address = TokenNetworkAddress(bytes(bytes([2] * 20))) network_event = ReceiveTokenNetworkCreatedEvent( token_address=token_address, token_network_address=token_network_address, settle_timeout=DEFAULT_TOKEN_NETWORK_SETTLE_TIMEOUT, block_number=BlockNumber(1), ) assert not pathfinding_service_mock.follows_token_network(token_network_address) assert len(pathfinding_service_mock.token_networks) == 1 pathfinding_service_mock.handle_event(network_event) assert pathfinding_service_mock.follows_token_network(token_network_address) assert len(pathfinding_service_mock.token_networks) == 2 # Test idempotency pathfinding_service_mock.handle_event(network_event) assert pathfinding_service_mock.follows_token_network(token_network_address) assert len(pathfinding_service_mock.token_networks) == 2
def test_save_and_load_token_networks(pathfinding_service_mock_empty): pfs = pathfinding_service_mock_empty token_address = Address("0x" + "1" * 40) token_network_address = TokenNetworkAddress("0x" + "2" * 40) channel_id = ChannelID(1) p1 = Address("0x" + "3" * 40) p2 = Address("0x" + "4" * 40) events = [ ReceiveTokenNetworkCreatedEvent( token_address=token_address, token_network_address=token_network_address, block_number=BlockNumber(1), ), ReceiveChannelOpenedEvent( token_network_address=token_network_address, channel_identifier=channel_id, participant1=p1, participant2=p2, settle_timeout=2** 65, # larger than max_uint64 to check hex storage block_number=BlockNumber(2), ), ] for event in events: pfs.handle_event(event) assert len(pfs.token_networks) == 1 loaded_networks = pfs._load_token_networks() # pylint: disable=protected-access assert len(loaded_networks) == 1 orig = list(pfs.token_networks.values())[0] loaded = list(loaded_networks.values())[0] assert loaded.address == orig.address assert loaded.channel_id_to_addresses == orig.channel_id_to_addresses assert loaded.G.nodes == orig.G.nodes
def test_save_and_load_token_networks(pathfinding_service_mock): pfs = pathfinding_service_mock pfs.token_networks = {} # the mock does not fit this case exactly token_address = Address("0x" + "1" * 40) token_network_address = TokenNetworkAddress("0x" + "2" * 40) channel_id = ChannelID(1) p1 = Address("0x" + "3" * 40) p2 = Address("0x" + "4" * 40) events = [ ReceiveTokenNetworkCreatedEvent( token_address=token_address, token_network_address=token_network_address, block_number=BlockNumber(1), ), ReceiveChannelOpenedEvent( token_network_address=token_network_address, channel_identifier=channel_id, participant1=p1, participant2=p2, settle_timeout=1000, block_number=BlockNumber(2), ), ] for event in events: pfs.handle_event(event) assert len(pfs.token_networks) == 1 loaded_networks = pfs._load_token_networks() assert len(loaded_networks) == 1 orig = list(pfs.token_networks.values())[0] loaded = list(loaded_networks.values())[0] assert loaded.address == orig.address assert loaded.channel_id_to_addresses == orig.channel_id_to_addresses assert loaded.G.nodes == orig.G.nodes
def get_blockchain_events( web3: Web3, contract_manager: ContractManager, chain_state: BlockchainState, to_block: BlockNumber, query_ms: bool = True, ) -> Tuple[BlockchainState, List[Event]]: # increment by one, as latest_known_block has been queried last time already from_block = BlockNumber(chain_state.latest_known_block + 1) # Check if the current block was already processed if from_block > to_block: return chain_state, [] new_chain_state = deepcopy(chain_state) log.info('Querying new block(s)', from_block=from_block, end_block=to_block) # first check for new token networks and add to state registry_events = query_blockchain_events( web3=web3, contract_manager=contract_manager, contract_address=new_chain_state.token_network_registry_address, contract_name=CONTRACT_TOKEN_NETWORK_REGISTRY, topics=create_registry_event_topics(contract_manager), from_block=from_block, to_block=to_block, ) events: List[Event] = [] for event in registry_events: token_network_address = event['args']['token_network_address'] token_address = event['args']['token_address'] block_number = event['blockNumber'] events.append( ReceiveTokenNetworkCreatedEvent( token_address=token_address, token_network_address=token_network_address, block_number=block_number, )) new_chain_state.token_network_addresses.append( event['args']['token_network_address']) # then check all token networks for token_network_address in new_chain_state.token_network_addresses: network_events = query_blockchain_events( web3=web3, contract_manager=contract_manager, contract_address=Address(token_network_address), contract_name=CONTRACT_TOKEN_NETWORK, topics=[None], from_block=from_block, to_block=to_block, ) for event in network_events: event_name = event['event'] common_infos = dict( token_network_address=event['address'], channel_identifier=event['args']['channel_identifier'], block_number=event['blockNumber'], ) if event_name == ChannelEvent.OPENED: events.append( ReceiveChannelOpenedEvent( participant1=event['args']['participant1'], participant2=event['args']['participant2'], settle_timeout=event['args']['settle_timeout'], **common_infos, )) elif event_name == ChannelEvent.DEPOSIT: events.append( ReceiveChannelNewDepositEvent( participant_address=event['args']['participant'], total_deposit=event['args']['total_deposit'], **common_infos, )) elif event_name == ChannelEvent.CLOSED: events.append( ReceiveChannelClosedEvent(closing_participant=event['args'] ['closing_participant'], **common_infos)) elif event_name == ChannelEvent.BALANCE_PROOF_UPDATED: events.append( ReceiveNonClosingBalanceProofUpdatedEvent( closing_participant=event['args'] ['closing_participant'], nonce=event['args']['nonce'], **common_infos, )) elif event_name == ChannelEvent.SETTLED: events.append(ReceiveChannelSettledEvent(**common_infos)) # get events from monitoring service contract if query_ms: monitoring_events = get_monitoring_blockchain_events( web3=web3, contract_manager=contract_manager, chain_state=new_chain_state, from_block=from_block, to_block=to_block, ) events.extend(monitoring_events) # commit new block number events.append(UpdatedHeadBlockEvent(head_block_number=to_block)) return new_chain_state, events
def get_blockchain_events( web3: Web3, token_network_addresses: List[TokenNetworkAddress], chain_state: BlockchainState, from_block: BlockNumber, to_block: BlockNumber, ) -> List[Event]: # Check if the current block was already processed if from_block > to_block: return [] log.info( "Querying new block(s)", from_block=from_block, to_block=to_block, # When `to_block` == `from_block` we query one block, so add one num_blocks=to_block - from_block + 1, ) # first check for new token networks and add to state registry_events = query_blockchain_events( web3=web3, contract_addresses=[chain_state.token_network_registry_address], from_block=from_block, to_block=to_block, ) events: List[Event] = [] for event_dict in registry_events: token_network_address = TokenNetworkAddress( to_canonical_address(event_dict["args"]["token_network_address"])) events.append( ReceiveTokenNetworkCreatedEvent( token_network_address=token_network_address, token_address=TokenAddress( to_canonical_address(event_dict["args"]["token_address"])), block_number=event_dict["blockNumber"], )) token_network_addresses.append(token_network_address) # then check all token networks network_events = query_blockchain_events( web3=web3, contract_addresses=token_network_addresses, # type: ignore from_block=from_block, to_block=to_block, ) for event_dict in network_events: event = parse_token_network_event(event_dict) if event: events.append(event) # get events from monitoring service contract, this only queries the chain # if the monitor contract address is set in chain_state monitoring_events = get_monitoring_blockchain_events( web3=web3, monitor_contract_address=chain_state.monitor_contract_address, from_block=from_block, to_block=to_block, ) events.extend(monitoring_events) # commit new block number events.append(UpdatedHeadBlockEvent(head_block_number=to_block)) return events
def test_crash(tmpdir, mockchain): # pylint: disable=too-many-locals """ Process blocks and compare results with/without crash A somewhat meaninful crash handling is simulated by not including the UpdatedHeadBlockEvent in every block. """ token_address = Address("0x" + "1" * 40) token_network_address = TokenNetworkAddress("0x" + "2" * 40) channel_id = ChannelID(1) p1 = Address("0x" + "3" * 40) p2 = Address("0x" + "4" * 40) events = [ [ ReceiveTokenNetworkCreatedEvent( token_address=token_address, token_network_address=token_network_address, block_number=BlockNumber(1), ) ], [UpdatedHeadBlockEvent(BlockNumber(2))], [ ReceiveChannelOpenedEvent( token_network_address=token_network_address, channel_identifier=channel_id, participant1=p1, participant2=p2, settle_timeout=1000, block_number=BlockNumber(3), ) ], [UpdatedHeadBlockEvent(BlockNumber(4))], ] mockchain(events) server_private_key = get_random_privkey() contracts = { CONTRACT_TOKEN_NETWORK_REGISTRY: ContractMock(), CONTRACT_USER_DEPOSIT: ContractMock(), } def new_service(filename): service = PathfindingService( web3=Web3Mock(), private_key=server_private_key, contracts=contracts, db_filename=os.path.join(tmpdir, filename), ) return service # initialize stable service stable_service = new_service("stable.db") # process each block and compare results between crashy and stable service for to_block in range(len(events)): crashy_service = new_service( "crashy.db") # new instance to simulate crash result_state: List[dict] = [] for service in [stable_service, crashy_service]: service._process_new_blocks(BlockNumber(to_block)) # pylint: disable=protected-access result_state.append( dict(db_dump=list(service.database.conn.iterdump()))) # both instances should have the same state after processing for stable_state, crashy_state in zip(result_state[0].values(), result_state[1].values()): # do asserts for each key separately to get better error messages assert stable_state == crashy_state
def test_crash(tmpdir, mockchain): # pylint: disable=too-many-locals """Process blocks and compare results with/without crash A somewhat meaninful crash handling is simulated by not including the UpdatedHeadBlockEvent in every block. """ token_address = TokenAddress(bytes([1] * 20)) token_network_address = TokenNetworkAddress(bytes([2] * 20)) channel_id = ChannelID(1) p1 = Address(bytes([3] * 20)) p2 = Address(bytes([4] * 20)) events = [ [ ReceiveTokenNetworkCreatedEvent( token_address=token_address, token_network_address=token_network_address, settle_timeout=DEFAULT_TOKEN_NETWORK_SETTLE_TIMEOUT, block_number=BlockNumber(1), ) ], [UpdatedHeadBlockEvent(BlockNumber(2))], [ ReceiveChannelOpenedEvent( token_network_address=token_network_address, channel_identifier=channel_id, participant1=p1, participant2=p2, block_number=BlockNumber(3), ) ], [UpdatedHeadBlockEvent(BlockNumber(4))], ] mockchain(events) server_private_key = PrivateKey(get_random_privkey()) contracts = { CONTRACT_TOKEN_NETWORK_REGISTRY: ContractMock(), CONTRACT_USER_DEPOSIT: ContractMock(), } def new_service(filename): service = PathfindingService( web3=Web3Mock(), private_key=server_private_key, contracts=contracts, sync_start_block=BlockNumber(0), required_confirmations=BlockTimeout(0), poll_interval=0, db_filename=os.path.join(tmpdir, filename), ) return service # initialize stable service stable_service = new_service("stable.db") # process each block and compare results between crashy and stable service for to_block in range(len(events)): crashy_service = new_service("crashy.db") # new instance to simulate crash result_state: List[dict] = [] for service in [stable_service, crashy_service]: service._process_new_blocks(BlockNumber(to_block)) # pylint: disable=protected-access result_state.append(dict(db_dump=list(service.database.conn.iterdump()))) # both instances should have the same state after processing for stable_state, crashy_state in zip(result_state[0].values(), result_state[1].values()): if isinstance(stable_state, BlockchainState): assert stable_state.chain_id == crashy_state.chain_id assert ( stable_state.token_network_registry_address == crashy_state.token_network_registry_address ) assert stable_state.latest_committed_block == crashy_state.latest_committed_block assert ( stable_state.monitor_contract_address == crashy_state.monitor_contract_address ) # Do not compare `current_event_filter_interval`, this is allowed to be different else: assert stable_state == crashy_state crashy_service.database.conn.close() # close the db connection so we can access it again
def get_blockchain_events( web3: Web3, contract_manager: ContractManager, chain_state: BlockchainState, to_block: BlockNumber, ) -> Tuple[BlockchainState, List[Event]]: # increment by one, as latest_known_block has been queried last time already from_block = BlockNumber(chain_state.latest_known_block + 1) # Check if the current block was already processed if from_block > to_block: return chain_state, [] new_chain_state = deepcopy(chain_state) log.info("Querying new block(s)", from_block=from_block, end_block=to_block) # first check for new token networks and add to state registry_events = query_blockchain_events( web3=web3, contract_manager=contract_manager, contract_address=new_chain_state.token_network_registry_address, contract_name=CONTRACT_TOKEN_NETWORK_REGISTRY, topics=create_registry_event_topics(contract_manager), from_block=from_block, to_block=to_block, ) events: List[Event] = [] for event_dict in registry_events: events.append( ReceiveTokenNetworkCreatedEvent( token_network_address=decode_hex( event_dict["args"]["token_network_address"]), token_address=decode_hex(event_dict["args"]["token_address"]), block_number=event_dict["blockNumber"], )) new_chain_state.token_network_addresses.append( event_dict["args"]["token_network_address"]) # then check all token networks for token_network_address in new_chain_state.token_network_addresses: network_events = query_blockchain_events( web3=web3, contract_manager=contract_manager, contract_address=Address(token_network_address), contract_name=CONTRACT_TOKEN_NETWORK, topics=[None], from_block=from_block, to_block=to_block, ) for event_dict in network_events: event = parse_token_network_event(event_dict) if event: events.append(event) # get events from monitoring service contract, this only queries the chain # if the monitor contract address is set in chain_state monitoring_events = get_monitoring_blockchain_events( web3=web3, contract_manager=contract_manager, chain_state=new_chain_state, from_block=from_block, to_block=to_block, ) events.extend(monitoring_events) # commit new block number events.append(UpdatedHeadBlockEvent(head_block_number=to_block)) return new_chain_state, events