def test_path_without_capacity(token_network_model: TokenNetwork, addresses: List[Address]): """Channels without capacity must not cause unexpected exceptions. Regression test for https://github.com/raiden-network/raiden-services/issues/636 """ token_network_model.handle_channel_opened_event( channel_identifier=ChannelID(1), participant1=addresses[0], participant2=addresses[1], ) token_network_model.handle_channel_opened_event( channel_identifier=ChannelID(2), participant1=addresses[1], participant2=addresses[2], ) token_network_model.G[addresses[1]][ addresses[2]]["view"].channel.capacity1 = 100 path = Path( G=token_network_model.G, nodes=[addresses[0], addresses[1], addresses[2]], value=PaymentAmount(10), reachability_state=SimpleReachabilityContainer({}), ) assert not path.is_valid
def test_token_channel_coop_settled(pathfinding_service_mock, token_network_model): setup_channel(pathfinding_service_mock, token_network_model) token_network_address = make_token_network_address() # Test invalid token network address settle_event = ReceiveChannelSettledEvent( token_network_address=token_network_address, channel_identifier=ChannelID(1), block_number=BlockNumber(2), ) pathfinding_service_mock.handle_event(settle_event) assert len(pathfinding_service_mock.token_networks) == 1 assert len(token_network_model.channel_id_to_addresses) == 1 # Test proper token network address settle_event = dataclasses.replace( settle_event, token_network_address=token_network_model.address ) pathfinding_service_mock.handle_event(settle_event) assert len(pathfinding_service_mock.token_networks) == 1 assert len(token_network_model.channel_id_to_addresses) == 0 # Test non-existent channel settle_event = dataclasses.replace(settle_event, channel_identifier=ChannelID(123)) pathfinding_service_mock.handle_event(settle_event) assert len(pathfinding_service_mock.token_networks) == 1 assert len(token_network_model.channel_id_to_addresses) == 0
def test_check_path_request_errors(token_network_model, addresses): a = addresses # pylint: disable=invalid-name # Not online checks assert (token_network_model.check_path_request_errors( a[0], a[2], 100, SimpleReachabilityContainer({})) == "Source not online") assert (token_network_model.check_path_request_errors( a[0], a[2], 100, SimpleReachabilityContainer({a[0]: AddressReachability.REACHABLE })) == "Target not online") # No channel checks reachability = SimpleReachabilityContainer({ a[0]: AddressReachability.REACHABLE, a[2]: AddressReachability.REACHABLE }) assert (token_network_model.check_path_request_errors( a[0], a[2], 100, reachability) == "No channel from source") token_network_model.handle_channel_opened_event( channel_identifier=ChannelID(1), participant1=a[0], participant2=a[1], ) assert (token_network_model.check_path_request_errors( a[0], a[2], 100, reachability) == "No channel to target") token_network_model.handle_channel_opened_event( channel_identifier=ChannelID(1), participant1=a[1], participant2=a[2], ) # Check capacities assert token_network_model.check_path_request_errors( a[0], a[2], 100, reachability).startswith( "Source does not have a channel with sufficient capacity") token_network_model.G.edges[a[0], a[1]]["view"].capacity = 100 assert token_network_model.check_path_request_errors( a[0], a[2], 100, reachability).startswith( "Target does not have a channel with sufficient capacity") token_network_model.G.edges[a[1], a[2]]["view"].capacity = 100 # Must return `None` when no errors could be found assert token_network_model.check_path_request_errors( a[0], a[2], 100, reachability) is None # Check error when there is no route in the graph token_network_model.handle_channel_opened_event( channel_identifier=ChannelID(2), participant1=a[3], participant2=a[4], ) token_network_model.G.edges[a[3], a[4]]["view"].capacity = 100 reachability.reachabilities[a[4]] = AddressReachability.REACHABLE assert (token_network_model.check_path_request_errors( a[0], a[4], 100, reachability) == "No route from source to target")
def test_scheduled_events(ms_database: Database): # Add token network used as foreign key token_network_address = TokenNetworkAddress(bytes([1] * 20)) ms_database.conn.execute( "INSERT INTO token_network (address, settle_timeout) VALUES (?, ?)", [ to_checksum_address(token_network_address), DEFAULT_TOKEN_NETWORK_SETTLE_TIMEOUT ], ) event1 = ScheduledEvent( trigger_timestamp=23 * 15, event=ActionMonitoringTriggeredEvent( token_network_address=token_network_address, channel_identifier=ChannelID(1), non_closing_participant=Address(bytes([1] * 20)), ), ) assert ms_database.scheduled_event_count() == 0 ms_database.upsert_scheduled_event(event=event1) assert ms_database.scheduled_event_count() == 1 event2 = ScheduledEvent( trigger_timestamp=24 * 15, event=ActionMonitoringTriggeredEvent( token_network_address=token_network_address, channel_identifier=ChannelID(1), non_closing_participant=Address(bytes([1] * 20)), ), ) ms_database.upsert_scheduled_event(event2) assert ms_database.scheduled_event_count() == 2 assert len(ms_database.get_scheduled_events(22 * 15)) == 0 assert len(ms_database.get_scheduled_events(23 * 15)) == 1 assert len(ms_database.get_scheduled_events(24 * 15)) == 2 ms_database.upsert_scheduled_event(event1) assert ms_database.scheduled_event_count() == 2 assert len(ms_database.get_scheduled_events(22 * 15)) == 0 assert len(ms_database.get_scheduled_events(23 * 15)) == 1 assert len(ms_database.get_scheduled_events(24 * 15)) == 2 ms_database.remove_scheduled_event(event2) assert len(ms_database.get_scheduled_events(22 * 15)) == 0 assert len(ms_database.get_scheduled_events(23 * 15)) == 1 assert len(ms_database.get_scheduled_events(24 * 15)) == 1
def test_save_and_load_token_networks(pathfinding_service_mock_empty): pfs = pathfinding_service_mock_empty token_address = TokenAddress(bytes([1] * 20)) token_network_address = TokenNetworkAddress(bytes([2] * 20)) channel_id = ChannelID(1) p1 = Address(bytes([3] * 20)) p2 = Address(bytes([4] * 20)) events = [ ReceiveTokenNetworkCreatedEvent( token_address=token_address, token_network_address=token_network_address, settle_timeout=DEFAULT_TOKEN_NETWORK_SETTLE_TIMEOUT, block_number=BlockNumber(1), ), ReceiveChannelOpenedEvent( token_network_address=token_network_address, channel_identifier=channel_id, participant1=p1, participant2=p2, block_number=BlockNumber(2), ), ] for event in events: pfs.handle_event(event) assert len(pfs.token_networks) == 1 loaded_networks = pfs._load_token_networks() # pylint: disable=protected-access assert len(loaded_networks) == 1 orig = list(pfs.token_networks.values())[0] loaded = list(loaded_networks.values())[0] assert loaded.address == orig.address assert loaded.channel_id_to_addresses == orig.channel_id_to_addresses assert loaded.G.nodes == orig.G.nodes
def __init__(self, channels: List[dict], default_capacity: TA = TA(1000)): super().__init__( token_network_address=TokenNetworkAddress(a(255)), settle_timeout=DEFAULT_TOKEN_NETWORK_SETTLE_TIMEOUT, ) # open channels channel_ids = itertools.count(100) for chan in channels: self.handle_channel_opened_event( channel_identifier=ChannelID(next(channel_ids)), participant1=a(chan["participant1"]), participant2=a(chan["participant2"]), ) cv1: ChannelView = self.G[a(chan["participant1"])][a( chan["participant2"])]["view"] cv1.capacity = chan.get("capacity1", default_capacity) cv2: ChannelView = self.G[a(chan["participant2"])][a( chan["participant1"])]["view"] cv2.capacity = chan.get("capacity2", default_capacity) # create reachability mapping for testing self.reachability_state = SimpleReachabilityContainer( {node: AddressReachability.REACHABLE for node in self.G.nodes})
def test_channels(pathfinding_service_mock): # Participants need to be ordered parts = sorted([make_address(), make_address(), make_address()]) token_network_address = TokenNetworkAddress(b"1" * 20) # register token network internally database = pathfinding_service_mock.database database.upsert_token_network(token_network_address, DEFAULT_TOKEN_NETWORK_SETTLE_TIMEOUT) channel1 = Channel( token_network_address=token_network_address, channel_id=ChannelID(1), participant1=parts[0], participant2=parts[1], ) channel2 = Channel( token_network_address=token_network_address, channel_id=ChannelID(2), participant1=parts[1], participant2=parts[2], ) # Test `upsert_channel` and `get_channels` database.upsert_channel(channel1) assert [chan.channel_id for chan in database.get_channels()] == [channel1.channel_id] database.upsert_channel(channel2) assert [chan.channel_id for chan in database.get_channels()] == [ channel1.channel_id, channel2.channel_id, ] # Test `delete_channel` assert database.delete_channel(channel1.token_network_address, channel1.channel_id) assert [chan.channel_id for chan in database.get_channels()] == [channel2.channel_id] assert not database.delete_channel(channel1.token_network_address, channel1.channel_id) assert [chan.channel_id for chan in database.get_channels()] == [channel2.channel_id]
def test_waiting_messages(pathfinding_service_mock): participant1_privkey, participant1 = make_privkey_address() token_network_address = TokenNetworkAddress(b"1" * 20) channel_id = ChannelID(1) # register token network internally database = pathfinding_service_mock.database database.conn.execute( "INSERT INTO token_network(address) VALUES (?)", [to_checksum_address(token_network_address)], ) fee_update = PFSFeeUpdate( canonical_identifier=CanonicalIdentifier( chain_identifier=ChainID(61), token_network_address=token_network_address, channel_identifier=channel_id, ), updating_participant=participant1, fee_schedule=FeeScheduleState(), timestamp=datetime.utcnow(), signature=EMPTY_SIGNATURE, ) fee_update.sign(LocalSigner(participant1_privkey)) capacity_update = PFSCapacityUpdate( canonical_identifier=CanonicalIdentifier( chain_identifier=ChainID(61), token_network_address=token_network_address, channel_identifier=channel_id, ), updating_participant=make_address(), other_participant=make_address(), updating_nonce=Nonce(1), other_nonce=Nonce(1), updating_capacity=TokenAmount(100), other_capacity=TokenAmount(111), reveal_timeout=BlockTimeout(50), signature=EMPTY_SIGNATURE, ) capacity_update.sign(LocalSigner(participant1_privkey)) for message in (fee_update, capacity_update): database.insert_waiting_message(message) recovered_messages = list( database.pop_waiting_messages( token_network_address=token_network_address, channel_id=channel_id)) assert len(recovered_messages) == 1 assert message == recovered_messages[0] recovered_messages2 = list( database.pop_waiting_messages( token_network_address=token_network_address, channel_id=channel_id)) assert len(recovered_messages2) == 0
def test_tn_idempotency_of_channel_openings(token_network_model: TokenNetwork, addresses: List[Address]): # create same channel 5 times for _ in range(5): token_network_model.handle_channel_opened_event( channel_identifier=ChannelID(1), participant1=addresses[0], participant2=addresses[1], ) # there should only be one channel assert len(token_network_model.channel_id_to_addresses) == 1 # now close the channel token_network_model.handle_channel_removed_event( channel_identifier=ChannelID(1)) # there should be no channels assert len(token_network_model.channel_id_to_addresses) == 0
def setup_channel(pathfinding_service_mock, token_network_model): channel_event = ReceiveChannelOpenedEvent( token_network_address=token_network_model.address, channel_identifier=ChannelID(1), participant1=PARTICIPANT1, participant2=PARTICIPANT2, block_number=BlockNumber(1), ) assert len(pathfinding_service_mock.token_networks) == 1 assert len(token_network_model.channel_id_to_addresses) == 0 pathfinding_service_mock.handle_event(channel_event)
def test_pfs_rejects_capacity_update_with_wrong_channel_identifier( pathfinding_service_web3_mock: PathfindingService, ): setup_channel(pathfinding_service_web3_mock) message = get_fee_update_message( channel_identifier=ChannelID(35), updating_participant=PRIVATE_KEY_1_ADDRESS, privkey_signer=PRIVATE_KEY_1, ) with pytest.raises(DeferMessage): pathfinding_service_web3_mock.on_fee_update(message)
def test_channel_settled_event_handler_leaves_existing_channel(context: Context): context = setup_state_with_closed_channel(context) event = ReceiveChannelSettledEvent( token_network_address=DEFAULT_TOKEN_NETWORK_ADDRESS, channel_identifier=ChannelID(4), block_number=BlockNumber(52), ) channel_settled_event_handler(event, context) assert context.database.channel_count() == 1 assert_channel_state(context, ChannelState.CLOSED)
def test_channel_constraints(pathfinding_service_mock): """Regression test for https://github.com/raiden-network/raiden-services/issues/693""" # Participants need to be ordered parts = sorted([make_address(), make_address()]) token_network_address1 = make_token_network_address() token_network_address2 = make_token_network_address() # register token network internally database = pathfinding_service_mock.database database.upsert_token_network(token_network_address1, DEFAULT_TOKEN_NETWORK_SETTLE_TIMEOUT) database.upsert_token_network(token_network_address2, DEFAULT_TOKEN_NETWORK_SETTLE_TIMEOUT) channel1 = Channel( token_network_address=token_network_address1, channel_id=ChannelID(1), participant1=parts[0], participant2=parts[1], ) channel2 = Channel( token_network_address=token_network_address2, channel_id=ChannelID(1), participant1=parts[0], participant2=parts[1], ) # Test `upsert_channel` and `get_channels` database.upsert_channel(channel1) assert [chan.channel_id for chan in database.get_channels()] == [channel1.channel_id] database.upsert_channel(channel2) assert [chan.channel_id for chan in database.get_channels()] == [ channel1.channel_id, channel2.channel_id, ]
def test_tn_multiple_channels_for_two_participants_opened( token_network_model: TokenNetwork, addresses: List[Address]): token_network_model.handle_channel_opened_event( channel_identifier=ChannelID(1), participant1=addresses[0], participant2=addresses[1], ) token_network_model.handle_channel_opened_event( channel_identifier=ChannelID(2), participant1=addresses[0], participant2=addresses[1], ) # now there should be two channels assert len(token_network_model.channel_id_to_addresses) == 2 # now close one channel token_network_model.handle_channel_removed_event( channel_identifier=ChannelID(1)) # there should be one channel left assert len(token_network_model.channel_id_to_addresses) == 1
def test_logging_processor(): # test if our logging processor changes bytes to checksum addresses # even if bytes-addresses are entangled into events logger = Mock() log_method = Mock() address = TokenAddress(b"\x7f[\xf6\xc9To\xa8\x185w\xe4\x9f\x15\xbc\xef@mr\xd5\xd9") address_log = format_to_hex( _logger=logger, _log_method=log_method, event_dict=dict(address=address) ) assert to_checksum_address(address) == address_log["address"] address2 = Address(b"\x7f[\xf6\xc9To\xa8\x185w\xe4\x9f\x15\xbc\xef@mr\xd5\xd1") event = ReceiveTokenNetworkCreatedEvent( token_address=address, token_network_address=TokenNetworkAddress(address2), settle_timeout=DEFAULT_TOKEN_NETWORK_SETTLE_TIMEOUT, block_number=BlockNumber(1), ) event_log = format_to_hex(_logger=logger, _log_method=log_method, event_dict=dict(event=event)) assert ( # pylint: disable=unsubscriptable-object to_checksum_address(address) == event_log["event"]["token_address"] ) assert ( # pylint: disable=unsubscriptable-object to_checksum_address(address2) == event_log["event"]["token_network_address"] ) assert ( # pylint: disable=unsubscriptable-object event_log["event"]["type_name"] == "ReceiveTokenNetworkCreatedEvent" ) message = PFSFeeUpdate( canonical_identifier=CanonicalIdentifier( chain_identifier=ChainID(61), token_network_address=TokenNetworkAddress(address), channel_identifier=ChannelID(1), ), updating_participant=PARTICIPANT1, fee_schedule=FeeScheduleState(), timestamp=datetime.utcnow(), signature=EMPTY_SIGNATURE, ) message_log = format_to_hex( _logger=logger, _log_method=log_method, event_dict=dict(message=message) ) assert ( # pylint: disable=unsubscriptable-object to_checksum_address(address) == message_log["message"]["canonical_identifier"]["token_network_address"] ) assert ( # pylint: disable=unsubscriptable-object message_log["message"]["type_name"] == "PFSFeeUpdate" )
def test_purge_old_monitor_requests( ms_database: Database, build_request_monitoring, request_collector, monitoring_service: MonitoringService, ): # We'll test the purge on MRs for three different channels req_mons = [ build_request_monitoring(channel_id=1), build_request_monitoring(channel_id=2), build_request_monitoring(channel_id=3), ] for req_mon in req_mons: request_collector.on_monitor_request(req_mon) # Channel 1 exists in the db token_network_address = req_mons[0].balance_proof.token_network_address ms_database.conn.execute( "INSERT INTO token_network VALUES (?, ?)", [ to_checksum_address(token_network_address), DEFAULT_TOKEN_NETWORK_SETTLE_TIMEOUT ], ) ms_database.upsert_channel( Channel( identifier=ChannelID(1), token_network_address=token_network_address, participant1=Address(b"1" * 20), participant2=Address(b"2" * 20), )) # The request for channel 2 is recent (default), but the one for channel 3 # has been added 16 minutes ago. saved_at = (datetime.utcnow() - timedelta(minutes=16)).timestamp() ms_database.conn.execute( """ UPDATE monitor_request SET saved_at = ? WHERE channel_identifier = ? """, [saved_at, hex256(3)], ) monitoring_service._purge_old_monitor_requests() # pylint: disable=protected-access remaining_mrs = ms_database.conn.execute(""" SELECT channel_identifier, waiting_for_channel FROM monitor_request ORDER BY channel_identifier """).fetchall() assert [tuple(mr) for mr in remaining_mrs] == [(1, False), (2, True)]
def test_update_fee(order, pathfinding_service_mock, token_network_model): metrics_state = save_metrics_state(metrics.REGISTRY) pathfinding_service_mock.database.insert( "token_network", dict(address=token_network_model.address) ) if order == "normal": setup_channel(pathfinding_service_mock, token_network_model) exception_expected = False else: exception_expected = True fee_schedule = FeeScheduleState( flat=FeeAmount(1), proportional=ProportionalFeeAmount(int(0.1e9)), imbalance_penalty=[(TokenAmount(0), FeeAmount(0)), (TokenAmount(10), FeeAmount(10))], ) fee_update = PFSFeeUpdate( canonical_identifier=CanonicalIdentifier( chain_identifier=ChainID(61), token_network_address=token_network_model.address, channel_identifier=ChannelID(1), ), updating_participant=PARTICIPANT1, fee_schedule=fee_schedule, timestamp=datetime.utcnow(), signature=EMPTY_SIGNATURE, ) fee_update.sign(LocalSigner(PARTICIPANT1_PRIVKEY)) pathfinding_service_mock.handle_message(fee_update) # Test for metrics having seen the processing of the message assert ( metrics_state.get_delta( "messages_processing_duration_seconds_sum", labels={"message_type": "PFSFeeUpdate"}, ) > 0.0 ) assert metrics_state.get_delta( "messages_exceptions_total", labels={"message_type": "PFSFeeUpdate"} ) == float(exception_expected) if order == "fee_update_before_channel_open": setup_channel(pathfinding_service_mock, token_network_model) cv = token_network_model.G[PARTICIPANT1][PARTICIPANT2]["view"] for key in ("flat", "proportional", "imbalance_penalty"): assert getattr(cv.fee_schedule_sender, key) == getattr(fee_schedule, key)
def test_token_channel_opened(pathfinding_service_mock, token_network_model): setup_channel(pathfinding_service_mock, token_network_model) assert len(pathfinding_service_mock.token_networks) == 1 assert len(token_network_model.channel_id_to_addresses) == 1 # Test invalid token network address channel_event = ReceiveChannelOpenedEvent( token_network_address=TokenNetworkAddress(bytes([2] * 20)), channel_identifier=ChannelID(1), participant1=PARTICIPANT1, participant2=PARTICIPANT2, block_number=BlockNumber(1), ) pathfinding_service_mock.handle_event(channel_event) assert len(pathfinding_service_mock.token_networks) == 1 assert len(token_network_model.channel_id_to_addresses) == 1
def test_prometheus_event_handling_no_exceptions(pathfinding_service_mock_empty): metrics_state = save_metrics_state(metrics.REGISTRY) pfs = pathfinding_service_mock_empty token_address = TokenAddress(bytes([1] * 20)) token_network_address = TokenNetworkAddress(bytes([2] * 20)) channel_id = ChannelID(1) p1 = Address(bytes([3] * 20)) p2 = Address(bytes([4] * 20)) events = [ ReceiveTokenNetworkCreatedEvent( token_address=token_address, token_network_address=token_network_address, settle_timeout=DEFAULT_TOKEN_NETWORK_SETTLE_TIMEOUT, block_number=BlockNumber(1), ), ReceiveChannelOpenedEvent( token_network_address=token_network_address, channel_identifier=channel_id, participant1=p1, participant2=p2, block_number=BlockNumber(2), ), ] for event in events: pfs.handle_event(event) # check that we have non-zero processing time for the events we created assert ( metrics_state.get_delta( "events_processing_duration_seconds_sum", labels={"event_type": event.__class__.__name__}, ) > 0.0 ) # there should be no exception raised assert ( metrics_state.get_delta( "events_exceptions_total", labels={"event_type": event.__class__.__name__} ) == 0.0 )
def test_channel_closed_event_handler_channel_not_in_database(context: Context): metrics_state = save_metrics_state(metrics.REGISTRY) # only setup the token network without channels create_default_token_network(context) event = ReceiveChannelClosedEvent( token_network_address=DEFAULT_TOKEN_NETWORK_ADDRESS, channel_identifier=ChannelID(4), closing_participant=DEFAULT_PARTICIPANT2, block_number=BlockNumber(52), ) assert context.database.channel_count() == 0 channel_closed_event_handler(event, context) assert context.database.channel_count() == 0 assert ( metrics_state.get_delta( "events_log_errors_total", labels=metrics.ErrorCategory.STATE.to_label_dict() ) == 1.0 )
def request_monitoring_message(token_network, get_accounts, get_private_key) -> RequestMonitoring: c1, c2 = get_accounts(2) balance_proof_c2 = HashedBalanceProof( channel_identifier=ChannelID(1), token_network_address=TokenNetworkAddress( to_canonical_address(token_network.address)), chain_id=ChainID(61), nonce=Nonce(2), additional_hash="0x%064x" % 0, transferred_amount=TokenAmount(1), locked_amount=TokenAmount(0), locksroot=encode_hex(LOCKSROOT_OF_NO_LOCKS), priv_key=get_private_key(c2), ) return balance_proof_c2.get_request_monitoring( privkey=get_private_key(c1), reward_amount=TokenAmount(1), monitoring_service_contract_address=MonitoringServiceAddress( bytes([11] * 20)), )
def f( chain_id: ChainID = TEST_CHAIN_ID, amount: TokenAmount = TokenAmount(50), nonce: Nonce = Nonce(1), channel_id: ChannelID = ChannelID(1), ) -> RequestMonitoring: balance_proof = HashedBalanceProof( channel_identifier=channel_id, token_network_address=TokenNetworkAddress(b"1" * 20), chain_id=chain_id, nonce=nonce, additional_hash="", balance_hash=encode_hex(bytes([amount])), priv_key=PrivateKey(get_random_privkey()), ) request_monitoring = balance_proof.get_request_monitoring( privkey=non_closing_privkey, reward_amount=TokenAmount(55), monitoring_service_contract_address=TEST_MSC_ADDRESS, ) # usually not a property of RequestMonitoring, but added for convenience in these tests request_monitoring.non_closing_signer = to_checksum_address(non_closing_address) return request_monitoring
def test_edge_weight(addresses): # pylint: disable=assigning-non-slot channel_id = ChannelID(1) participant1 = addresses[0] participant2 = addresses[1] capacity = TokenAmount(int(20 * 1e18)) capacity_partner = TokenAmount(int(10 * 1e18)) channel = Channel( token_network_address=TokenNetworkAddress(bytes([1])), channel_id=channel_id, participant1=participant1, participant2=participant2, capacity1=capacity, capacity2=capacity_partner, ) view, view_partner = channel.views amount = PaymentAmount(int(1e18)) # one RDN # no penalty assert (TokenNetwork.edge_weight(visited={}, view=view, view_from_partner=view_partner, amount=amount, fee_penalty=0) == 1) # channel already used in a previous route assert (TokenNetwork.edge_weight( visited={channel_id: 2}, view=view, view_from_partner=view_partner, amount=amount, fee_penalty=0, ) == 3) # absolute fee view.fee_schedule_sender.flat = FeeAmount(int(0.03e18)) assert (TokenNetwork.edge_weight( visited={}, view=view, view_from_partner=view_partner, amount=amount, fee_penalty=100, ) == 4) # relative fee view.fee_schedule_sender.flat = FeeAmount(0) view.fee_schedule_sender.proportional = ProportionalFeeAmount(int(0.01e6)) assert (TokenNetwork.edge_weight( visited={}, view=view, view_from_partner=view_partner, amount=amount, fee_penalty=100, ) == 2) # partner has not enough capacity for refund (no_refund_weight) -> edge weight +1 view_partner.capacity = TokenAmount(0) assert (TokenNetwork.edge_weight( visited={}, view=view, view_from_partner=view_partner, amount=amount, fee_penalty=100, ) == 3)
def populate_token_network_random( token_network_model: TokenNetwork, private_keys: List[str] ) -> None: number_of_channels = 300 # seed for pseudo-randomness from config constant, that changes from time to time random.seed(number_of_channels) for channel_id_int in range(number_of_channels): channel_id = ChannelID(channel_id_int) private_key1, private_key2 = random.sample(private_keys, 2) address1 = private_key_to_address(private_key1) address2 = private_key_to_address(private_key2) token_network_model.handle_channel_opened_event( channel_identifier=channel_id, participant1=address1, participant2=address2, ) # deposit to channels deposit1 = TokenAmount(random.randint(0, 1000)) deposit2 = TokenAmount(random.randint(0, 1000)) address1, address2 = token_network_model.channel_id_to_addresses[channel_id] token_network_model.handle_channel_balance_update_message( PFSCapacityUpdate( canonical_identifier=CanonicalIdentifier( chain_identifier=ChainID(61), channel_identifier=channel_id, token_network_address=TokenNetworkAddress(token_network_model.address), ), updating_participant=address1, other_participant=address2, updating_nonce=Nonce(1), other_nonce=Nonce(1), updating_capacity=deposit1, other_capacity=deposit2, reveal_timeout=BlockTimeout(2), signature=EMPTY_SIGNATURE, ), updating_capacity_partner=TokenAmount(0), other_capacity_partner=TokenAmount(0), ) token_network_model.handle_channel_balance_update_message( PFSCapacityUpdate( canonical_identifier=CanonicalIdentifier( chain_identifier=ChainID(61), channel_identifier=channel_id, token_network_address=TokenNetworkAddress(token_network_model.address), ), updating_participant=address2, other_participant=address1, updating_nonce=Nonce(2), other_nonce=Nonce(1), updating_capacity=deposit2, other_capacity=deposit1, reveal_timeout=BlockTimeout(2), signature=EMPTY_SIGNATURE, ), updating_capacity_partner=TokenAmount(deposit1), other_capacity_partner=TokenAmount(deposit2), )
def populate_token_network( token_network: TokenNetwork, reachability_state: SimpleReachabilityContainer, addresses: List[Address], channel_descriptions: List, ): for ( channel_id, ( p1_index, p1_capacity, _p1_fee, p1_reveal_timeout, p1_reachability, p2_index, p2_capacity, _p2_fee, p2_reveal_timeout, p2_reachability, ), ) in enumerate(channel_descriptions): participant1 = addresses[p1_index] participant2 = addresses[p2_index] token_network.handle_channel_opened_event( channel_identifier=ChannelID(channel_id), participant1=participant1, participant2=participant2, ) token_network.handle_channel_balance_update_message( PFSCapacityUpdate( canonical_identifier=CanonicalIdentifier( chain_identifier=ChainID(61), channel_identifier=ChannelID(channel_id), token_network_address=TokenNetworkAddress(token_network.address), ), updating_participant=addresses[p1_index], other_participant=addresses[p2_index], updating_nonce=Nonce(1), other_nonce=Nonce(1), updating_capacity=p1_capacity, other_capacity=p2_capacity, reveal_timeout=p1_reveal_timeout, signature=EMPTY_SIGNATURE, ), updating_capacity_partner=TokenAmount(0), other_capacity_partner=TokenAmount(0), ) token_network.handle_channel_balance_update_message( PFSCapacityUpdate( canonical_identifier=CanonicalIdentifier( chain_identifier=ChainID(61), channel_identifier=ChannelID(channel_id), token_network_address=TokenNetworkAddress(token_network.address), ), updating_participant=addresses[p2_index], other_participant=addresses[p1_index], updating_nonce=Nonce(2), other_nonce=Nonce(1), updating_capacity=p2_capacity, other_capacity=p1_capacity, reveal_timeout=p2_reveal_timeout, signature=EMPTY_SIGNATURE, ), updating_capacity_partner=TokenAmount(p1_capacity), other_capacity_partner=TokenAmount(p2_capacity), ) # Update presence state according to scenario reachability_state.reachabilities[participant1] = p1_reachability reachability_state.reachabilities[participant2] = p2_reachability
) from monitoring_service.states import ( Channel, HashedBalanceProof, MonitorRequest, OnChainUpdateStatus, ) from raiden_contracts.constants import ChannelState from raiden_contracts.utils.type_aliases import PrivateKey from raiden_libs.utils import private_key_to_address from tests.constants import TEST_CHAIN_ID, TEST_MSC_ADDRESS DEFAULT_TOKEN_NETWORK_ADDRESS = TokenNetworkAddress(bytes([1] * 20)) DEFAULT_TOKEN_ADDRESS = TokenAddress(bytes([9] * 20)) DEFAULT_CHANNEL_IDENTIFIER = ChannelID(3) DEFAULT_PRIVATE_KEY1 = PrivateKey(decode_hex("0x" + "1" * 64)) DEFAULT_PRIVATE_KEY2 = PrivateKey(decode_hex("0x" + "2" * 64)) DEFAULT_PARTICIPANT1 = private_key_to_address(DEFAULT_PRIVATE_KEY1) DEFAULT_PARTICIPANT2 = private_key_to_address(DEFAULT_PRIVATE_KEY2) DEFAULT_PRIVATE_KEY_OTHER = PrivateKey(decode_hex("0x" + "3" * 64)) DEFAULT_PARTICIPANT_OTHER = private_key_to_address(DEFAULT_PRIVATE_KEY_OTHER) DEFAULT_REWARD_AMOUNT = TokenAmount(1) DEFAULT_SETTLE_TIMEOUT = 100 * 15 # time in seconds def create_signed_monitor_request( chain_id: ChainID = TEST_CHAIN_ID, nonce: Nonce = Nonce(5), reward_amount: TokenAmount = DEFAULT_REWARD_AMOUNT, closing_privkey: PrivateKey = DEFAULT_PRIVATE_KEY1,
from eth_utils import decode_hex from raiden_common.utils.typing import ( ChainID, ChannelID, MonitoringServiceAddress, TokenNetworkAddress, ) from raiden_libs.utils import private_key_to_address KEYSTORE_FILE_NAME = "keystore.txt" KEYSTORE_PASSWORD = "******" TEST_MSC_ADDRESS = MonitoringServiceAddress(b"9" * 20) TEST_CHAIN_ID = ChainID(131277322940537) DEFAULT_TOKEN_NETWORK_ADDRESS = TokenNetworkAddress( decode_hex("0x6e46B62a245D9EE7758B8DdCCDD1B85fF56B9Bc9")) DEFAULT_TOKEN_NETWORK_SETTLE_TIMEOUT = 1500 # time in seconds PRIVATE_KEY_1 = bytes([1] * 32) PRIVATE_KEY_1_ADDRESS = private_key_to_address(PRIVATE_KEY_1) PRIVATE_KEY_2 = bytes([2] * 32) PRIVATE_KEY_2_ADDRESS = private_key_to_address(PRIVATE_KEY_2) PRIVATE_KEY_3 = bytes([3] * 32) PRIVATE_KEY_3_ADDRESS = private_key_to_address(PRIVATE_KEY_3) DEFAULT_CHANNEL_ID = ChannelID(0)
def test_crash( tmpdir, get_accounts, get_private_key, mockchain ): # pylint: disable=too-many-locals """Process blocks and compare results with/without crash A somewhat meaningful crash handling is simulated by not including the UpdatedHeadBlockEvent in every block. """ channel_identifier = ChannelID(3) c1, c2 = get_accounts(2) token_network_address = TokenNetworkAddress(to_canonical_address(get_random_address())) balance_proof = HashedBalanceProof( nonce=Nonce(1), transferred_amount=TokenAmount(2), priv_key=get_private_key(c1), channel_identifier=channel_identifier, token_network_address=token_network_address, chain_id=ChainID(61), additional_hash="0x%064x" % 0, locked_amount=0, locksroot=encode_hex(LOCKSROOT_OF_NO_LOCKS), ) monitor_request = balance_proof.get_monitor_request( get_private_key(c2), reward_amount=TokenAmount(0), msc_address=TEST_MSC_ADDRESS ) events = [ [ ReceiveChannelOpenedEvent( token_network_address=token_network_address, channel_identifier=channel_identifier, participant1=c1, participant2=c2, block_number=BlockNumber(0), ) ], [UpdatedHeadBlockEvent(BlockNumber(1))], [ ActionMonitoringTriggeredEvent( token_network_address=token_network_address, channel_identifier=channel_identifier, non_closing_participant=c2, ) ], [UpdatedHeadBlockEvent(BlockNumber(3))], ] mockchain(events) server_private_key = PrivateKey(get_random_privkey()) contracts = { CONTRACT_TOKEN_NETWORK_REGISTRY: ContractMock(), CONTRACT_MONITORING_SERVICE: ContractMock(), CONTRACT_USER_DEPOSIT: ContractMock(), CONTRACT_SERVICE_REGISTRY: ContractMock(), } def new_ms(filename): ms = MonitoringService( web3=Web3Mock(), private_key=server_private_key, contracts=contracts, db_filename=os.path.join(tmpdir, filename), poll_interval=0, required_confirmations=BlockTimeout(0), sync_start_block=BlockNumber(0), ) msc = Mock() ms.context.monitoring_service_contract = msc ms.monitor_mock = msc.functions.monitor.return_value.transact ms.monitor_mock.return_value = bytes(0) return ms # initialize both monitoring services stable_ms = new_ms("stable.db") crashy_ms = new_ms("crashy.db") for ms in [stable_ms, crashy_ms]: # mock database time to make results reproducible ms.database.conn.create_function("CURRENT_TIMESTAMP", 1, lambda: "2000-01-01") ms.database.conn.execute( "INSERT INTO token_network (address, settle_timeout) VALUES (?, ?)", [to_checksum_address(token_network_address), DEFAULT_TOKEN_NETWORK_SETTLE_TIMEOUT], ) ms.context.ms_state.blockchain_state.token_network_addresses = [token_network_address] ms.database.upsert_monitor_request(monitor_request) ms.database.conn.commit() # process each block and compare results between crashy and stable ms for to_block in range(len(events)): crashy_ms = new_ms("crashy.db") # new instance to simulate crash stable_ms.monitor_mock.reset_mock() # clear calls from last block result_state: List[dict] = [] for ms in [stable_ms, crashy_ms]: ms._process_new_blocks(BlockNumber(to_block)) # pylint: disable=protected-access result_state.append( dict( blockchain_state=ms.context.ms_state.blockchain_state, db_dump=list(ms.database.conn.iterdump()), monitor_calls=ms.monitor_mock.mock_calls, ) ) # both instances should have the same state after processing for stable_state, crashy_state in zip(result_state[0].values(), result_state[1].values()): if isinstance(stable_state, BlockchainState): assert stable_state.chain_id == crashy_state.chain_id assert ( stable_state.token_network_registry_address == crashy_state.token_network_registry_address ) assert stable_state.latest_committed_block == crashy_state.latest_committed_block assert ( stable_state.monitor_contract_address == crashy_state.monitor_contract_address ) # Do not compare `current_event_filter_interval`, this is allowed to be different else: assert stable_state == crashy_state
def test_crash(tmpdir, mockchain): # pylint: disable=too-many-locals """Process blocks and compare results with/without crash A somewhat meaninful crash handling is simulated by not including the UpdatedHeadBlockEvent in every block. """ token_address = TokenAddress(bytes([1] * 20)) token_network_address = TokenNetworkAddress(bytes([2] * 20)) channel_id = ChannelID(1) p1 = Address(bytes([3] * 20)) p2 = Address(bytes([4] * 20)) events = [ [ ReceiveTokenNetworkCreatedEvent( token_address=token_address, token_network_address=token_network_address, settle_timeout=DEFAULT_TOKEN_NETWORK_SETTLE_TIMEOUT, block_number=BlockNumber(1), ) ], [UpdatedHeadBlockEvent(BlockNumber(2))], [ ReceiveChannelOpenedEvent( token_network_address=token_network_address, channel_identifier=channel_id, participant1=p1, participant2=p2, block_number=BlockNumber(3), ) ], [UpdatedHeadBlockEvent(BlockNumber(4))], ] mockchain(events) server_private_key = PrivateKey(get_random_privkey()) contracts = { CONTRACT_TOKEN_NETWORK_REGISTRY: ContractMock(), CONTRACT_USER_DEPOSIT: ContractMock(), } def new_service(filename): service = PathfindingService( web3=Web3Mock(), private_key=server_private_key, contracts=contracts, sync_start_block=BlockNumber(0), required_confirmations=BlockTimeout(0), poll_interval=0, db_filename=os.path.join(tmpdir, filename), ) return service # initialize stable service stable_service = new_service("stable.db") # process each block and compare results between crashy and stable service for to_block in range(len(events)): crashy_service = new_service("crashy.db") # new instance to simulate crash result_state: List[dict] = [] for service in [stable_service, crashy_service]: service._process_new_blocks(BlockNumber(to_block)) # pylint: disable=protected-access result_state.append(dict(db_dump=list(service.database.conn.iterdump()))) # both instances should have the same state after processing for stable_state, crashy_state in zip(result_state[0].values(), result_state[1].values()): if isinstance(stable_state, BlockchainState): assert stable_state.chain_id == crashy_state.chain_id assert ( stable_state.token_network_registry_address == crashy_state.token_network_registry_address ) assert stable_state.latest_committed_block == crashy_state.latest_committed_block assert ( stable_state.monitor_contract_address == crashy_state.monitor_contract_address ) # Do not compare `current_event_filter_interval`, this is allowed to be different else: assert stable_state == crashy_state crashy_service.database.conn.close() # close the db connection so we can access it again