def test_logging_processor(): # test if our logging processor changes bytes to checksum addresses # even if bytes-addresses are entangled into events logger = Mock() log_method = Mock() address = TokenAddress(b"\x7f[\xf6\xc9To\xa8\x185w\xe4\x9f\x15\xbc\xef@mr\xd5\xd9") address_log = format_to_hex( _logger=logger, _log_method=log_method, event_dict=dict(address=address) ) assert to_checksum_address(address) == address_log["address"] address2 = Address(b"\x7f[\xf6\xc9To\xa8\x185w\xe4\x9f\x15\xbc\xef@mr\xd5\xd1") event = ReceiveTokenNetworkCreatedEvent( token_address=address, token_network_address=TokenNetworkAddress(address2), settle_timeout=DEFAULT_TOKEN_NETWORK_SETTLE_TIMEOUT, block_number=BlockNumber(1), ) event_log = format_to_hex(_logger=logger, _log_method=log_method, event_dict=dict(event=event)) assert ( # pylint: disable=unsubscriptable-object to_checksum_address(address) == event_log["event"]["token_address"] ) assert ( # pylint: disable=unsubscriptable-object to_checksum_address(address2) == event_log["event"]["token_network_address"] ) assert ( # pylint: disable=unsubscriptable-object event_log["event"]["type_name"] == "ReceiveTokenNetworkCreatedEvent" ) message = PFSFeeUpdate( canonical_identifier=CanonicalIdentifier( chain_identifier=ChainID(61), token_network_address=TokenNetworkAddress(address), channel_identifier=ChannelID(1), ), updating_participant=PARTICIPANT1, fee_schedule=FeeScheduleState(), timestamp=datetime.utcnow(), signature=EMPTY_SIGNATURE, ) message_log = format_to_hex( _logger=logger, _log_method=log_method, event_dict=dict(message=message) ) assert ( # pylint: disable=unsubscriptable-object to_checksum_address(address) == message_log["message"]["canonical_identifier"]["token_network_address"] ) assert ( # pylint: disable=unsubscriptable-object message_log["message"]["type_name"] == "PFSFeeUpdate" )
def test_prometheus_event_handling_raise_exception(pathfinding_service_mock_empty): metrics_state = save_metrics_state(metrics.REGISTRY) pfs = pathfinding_service_mock_empty event = ReceiveTokenNetworkCreatedEvent( token_address=TokenAddress(bytes([1] * 20)), token_network_address=TokenNetworkAddress(bytes([2] * 20)), settle_timeout=DEFAULT_TOKEN_NETWORK_SETTLE_TIMEOUT, block_number=BlockNumber(1), ) pfs.handle_token_network_created = Mock(side_effect=KeyError()) with pytest.raises(KeyError): pfs.handle_event(event) # The exceptions raised in the wrapped part of the prometheus logging # will not be handled anywhere at the moment. # Force an exception and test correct logging of it anyways, # since at some point higher in the call stack we could catch exceptions. assert ( metrics_state.get_delta( "events_exceptions_total", labels={"event_type": "ReceiveTokenNetworkCreatedEvent"}, ) == 1.0 )
def test_feedback_stats(pathfinding_service_mock): token_network_address = TokenNetworkAddress(b"1" * 20) default_path = [b"1" * 20, b"2" * 20, b"3" * 20] feedback_token = FeedbackToken(token_network_address) database = pathfinding_service_mock.database estimated_fee = 0 database.prepare_feedback(feedback_token, default_path, estimated_fee) assert database.get_num_routes_feedback() == 1 assert database.get_num_routes_feedback(only_with_feedback=True) == 0 assert database.get_num_routes_feedback(only_successful=True) == 0 database.update_feedback(feedback_token, default_path, False) assert database.get_num_routes_feedback() == 1 assert database.get_num_routes_feedback(only_with_feedback=True) == 1 assert database.get_num_routes_feedback(only_successful=True) == 0 default_path2 = default_path[1:] feedback_token2 = FeedbackToken(token_network_address) database.prepare_feedback(feedback_token2, default_path2, estimated_fee) assert database.get_num_routes_feedback() == 2 assert database.get_num_routes_feedback(only_with_feedback=True) == 1 assert database.get_num_routes_feedback(only_successful=True) == 0 database.update_feedback(feedback_token2, default_path2, True) assert database.get_num_routes_feedback() == 2 assert database.get_num_routes_feedback(only_with_feedback=True) == 2 assert database.get_num_routes_feedback(only_successful=True) == 1
def __init__(self, channels: List[dict], default_capacity: TA = TA(1000)): super().__init__( token_network_address=TokenNetworkAddress(a(255)), settle_timeout=DEFAULT_TOKEN_NETWORK_SETTLE_TIMEOUT, ) # open channels channel_ids = itertools.count(100) for chan in channels: self.handle_channel_opened_event( channel_identifier=ChannelID(next(channel_ids)), participant1=a(chan["participant1"]), participant2=a(chan["participant2"]), ) cv1: ChannelView = self.G[a(chan["participant1"])][a( chan["participant2"])]["view"] cv1.capacity = chan.get("capacity1", default_capacity) cv2: ChannelView = self.G[a(chan["participant2"])][a( chan["participant1"])]["view"] cv2.capacity = chan.get("capacity2", default_capacity) # create reachability mapping for testing self.reachability_state = SimpleReachabilityContainer( {node: AddressReachability.REACHABLE for node in self.G.nodes})
def test_save_and_load_token_networks(pathfinding_service_mock_empty): pfs = pathfinding_service_mock_empty token_address = TokenAddress(bytes([1] * 20)) token_network_address = TokenNetworkAddress(bytes([2] * 20)) channel_id = ChannelID(1) p1 = Address(bytes([3] * 20)) p2 = Address(bytes([4] * 20)) events = [ ReceiveTokenNetworkCreatedEvent( token_address=token_address, token_network_address=token_network_address, settle_timeout=DEFAULT_TOKEN_NETWORK_SETTLE_TIMEOUT, block_number=BlockNumber(1), ), ReceiveChannelOpenedEvent( token_network_address=token_network_address, channel_identifier=channel_id, participant1=p1, participant2=p2, block_number=BlockNumber(2), ), ] for event in events: pfs.handle_event(event) assert len(pfs.token_networks) == 1 loaded_networks = pfs._load_token_networks() # pylint: disable=protected-access assert len(loaded_networks) == 1 orig = list(pfs.token_networks.values())[0] loaded = list(loaded_networks.values())[0] assert loaded.address == orig.address assert loaded.channel_id_to_addresses == orig.channel_id_to_addresses assert loaded.G.nodes == orig.G.nodes
def get_feedback_token(self, token_id: UUID, token_network_address: TokenNetworkAddress, route: List[Address]) -> Optional[FeedbackToken]: hexed_route = [to_checksum_address(e) for e in route] with self._cursor() as cursor: token = cursor.execute( """SELECT * FROM feedback WHERE token_id = ? AND token_network_address = ? AND route = ?; """, [ token_id.hex, to_checksum_address(token_network_address), json.dumps(hexed_route), ], ).fetchone() if token: return FeedbackToken( token_network_address=TokenNetworkAddress( to_canonical_address(token["token_network_address"])), uuid=UUID(token["token_id"]), creation_time=token["creation_time"], ) return None
def test_insert_feedback_token(pathfinding_service_mock): token_network_address = TokenNetworkAddress(b"1" * 20) route = [Address(b"2" * 20), Address(b"3" * 20)] estimated_fee = -123 token = FeedbackToken(token_network_address=token_network_address) database = pathfinding_service_mock.database database.prepare_feedback(token=token, route=route, estimated_fee=estimated_fee) # Test round-trip stored = database.get_feedback_token( token_id=token.uuid, token_network_address=token_network_address, route=route) assert stored == token # Test different UUID stored = database.get_feedback_token( token_id=uuid4(), token_network_address=token_network_address, route=route) assert stored is None # Test different token network address token_network_address_wrong = TokenNetworkAddress(b"9" * 20) stored = database.get_feedback_token( token_id=uuid4(), token_network_address=token_network_address_wrong, route=route) assert stored is None # Test different route route_wrong = [Address(b"2" * 20), Address(b"3" * 20), Address(b"4" * 20)] stored = database.get_feedback_token( token_id=uuid4(), token_network_address=token_network_address, route=route_wrong) assert stored is None # Test empty route stored = database.get_feedback_token( token_id=uuid4(), token_network_address=token_network_address, route=[]) assert stored is None
def test_waiting_messages(pathfinding_service_mock): participant1_privkey, participant1 = make_privkey_address() token_network_address = TokenNetworkAddress(b"1" * 20) channel_id = ChannelID(1) # register token network internally database = pathfinding_service_mock.database database.conn.execute( "INSERT INTO token_network(address) VALUES (?)", [to_checksum_address(token_network_address)], ) fee_update = PFSFeeUpdate( canonical_identifier=CanonicalIdentifier( chain_identifier=ChainID(61), token_network_address=token_network_address, channel_identifier=channel_id, ), updating_participant=participant1, fee_schedule=FeeScheduleState(), timestamp=datetime.utcnow(), signature=EMPTY_SIGNATURE, ) fee_update.sign(LocalSigner(participant1_privkey)) capacity_update = PFSCapacityUpdate( canonical_identifier=CanonicalIdentifier( chain_identifier=ChainID(61), token_network_address=token_network_address, channel_identifier=channel_id, ), updating_participant=make_address(), other_participant=make_address(), updating_nonce=Nonce(1), other_nonce=Nonce(1), updating_capacity=TokenAmount(100), other_capacity=TokenAmount(111), reveal_timeout=BlockTimeout(50), signature=EMPTY_SIGNATURE, ) capacity_update.sign(LocalSigner(participant1_privkey)) for message in (fee_update, capacity_update): database.insert_waiting_message(message) recovered_messages = list( database.pop_waiting_messages( token_network_address=token_network_address, channel_id=channel_id)) assert len(recovered_messages) == 1 assert message == recovered_messages[0] recovered_messages2 = list( database.pop_waiting_messages( token_network_address=token_network_address, channel_id=channel_id)) assert len(recovered_messages2) == 0
def get_token_networks(self) -> Iterator[TokenNetwork]: with self._cursor() as cursor: for row in cursor.execute( "SELECT address, settle_timeout FROM token_network"): yield TokenNetwork( token_network_address=TokenNetworkAddress( to_canonical_address(row[0])), settle_timeout=row[1], )
def get( # pylint: disable=no-self-use self, token_network_address: str, source_address: str, target_address: Optional[str] = None, ) -> Tuple[dict, int]: request_count = 0 responses = [] for req in last_failed_requests: log.debug("Last Requests Values:", req=req) matches_params = is_same_address( token_network_address, req["token_network_address"] ) and is_same_address(source_address, req["source"]) if target_address is not None: matches_params = matches_params and is_same_address(target_address, req["target"]) if matches_params: request_count += 1 responses.append( dict(source=req["source"], target=req["target"], routes=req["routes"]) ) decoded_target_address: Optional[Address] = None if target_address: decoded_target_address = to_canonical_address(target_address) feedback_routes = self.pathfinding_service.database.get_feedback_routes( TokenNetworkAddress(to_canonical_address(token_network_address)), to_canonical_address(source_address), decoded_target_address, ) # Group routes after request (each request shares the `token_id`) grouped_routes: Dict[str, List[Dict]] = collections.defaultdict(list) for route in feedback_routes: grouped_routes[route["token_id"]].append(route) for requests in grouped_routes.values(): routes = [ {"path": route["route"], "estimated_fee": route["estimated_fee"]} for route in requests ] responses.append( { "source": requests[0]["source_address"], "target": requests[0]["target_address"], "routes": routes, } ) request_count += 1 return dict(request_count=request_count, responses=responses), 200
def create_scheduled_event(row: sqlite3.Row) -> ScheduledEvent: event_type = EVENT_ID_TYPE_MAP[row["event_type"]] sub_event = event_type( TokenNetworkAddress( to_canonical_address(row["token_network_address"])), row["channel_identifier"], row["non_closing_participant"], ) return ScheduledEvent(trigger_timestamp=row["trigger_timestamp"], event=sub_event)
def _validate_token_network_argument(self, token_network_address: str) -> TokenNetwork: if not is_checksum_address(token_network_address): raise exceptions.InvalidTokenNetwork( msg="The token network needs to be given as a checksummed address", token_network=token_network_address, ) token_network = self.pathfinding_service.get_token_network( TokenNetworkAddress(to_canonical_address(token_network_address)) ) if token_network is None: raise exceptions.UnsupportedTokenNetwork(token_network=token_network_address) return token_network
def test_scheduled_events(ms_database: Database): # Add token network used as foreign key token_network_address = TokenNetworkAddress(bytes([1] * 20)) ms_database.conn.execute( "INSERT INTO token_network (address, settle_timeout) VALUES (?, ?)", [ to_checksum_address(token_network_address), DEFAULT_TOKEN_NETWORK_SETTLE_TIMEOUT ], ) event1 = ScheduledEvent( trigger_timestamp=23 * 15, event=ActionMonitoringTriggeredEvent( token_network_address=token_network_address, channel_identifier=ChannelID(1), non_closing_participant=Address(bytes([1] * 20)), ), ) assert ms_database.scheduled_event_count() == 0 ms_database.upsert_scheduled_event(event=event1) assert ms_database.scheduled_event_count() == 1 event2 = ScheduledEvent( trigger_timestamp=24 * 15, event=ActionMonitoringTriggeredEvent( token_network_address=token_network_address, channel_identifier=ChannelID(1), non_closing_participant=Address(bytes([1] * 20)), ), ) ms_database.upsert_scheduled_event(event2) assert ms_database.scheduled_event_count() == 2 assert len(ms_database.get_scheduled_events(22 * 15)) == 0 assert len(ms_database.get_scheduled_events(23 * 15)) == 1 assert len(ms_database.get_scheduled_events(24 * 15)) == 2 ms_database.upsert_scheduled_event(event1) assert ms_database.scheduled_event_count() == 2 assert len(ms_database.get_scheduled_events(22 * 15)) == 0 assert len(ms_database.get_scheduled_events(23 * 15)) == 1 assert len(ms_database.get_scheduled_events(24 * 15)) == 2 ms_database.remove_scheduled_event(event2) assert len(ms_database.get_scheduled_events(22 * 15)) == 0 assert len(ms_database.get_scheduled_events(23 * 15)) == 1 assert len(ms_database.get_scheduled_events(24 * 15)) == 1
def test_feedback_token_validity(): token_network_address = TokenNetworkAddress(b"1" * 20) # Newly created token is valid valid_token = FeedbackToken(token_network_address=token_network_address) assert valid_token.is_valid() # Test expiry in is_valid invalid_token = FeedbackToken( creation_time=datetime.utcnow() - MAX_AGE_OF_FEEDBACK_REQUESTS - timedelta(seconds=1), token_network_address=token_network_address, ) assert not invalid_token.is_valid()
def test_pfs_rejects_capacity_update_with_wrong_token_network_address( pathfinding_service_web3_mock: PathfindingService, ): setup_channel(pathfinding_service_web3_mock) message = get_fee_update_message( token_network_address=TokenNetworkAddress( to_canonical_address("0x" + "1" * 40)), updating_participant=PRIVATE_KEY_1_ADDRESS, privkey_signer=PRIVATE_KEY_1, ) with pytest.raises(InvalidFeeUpdate) as exinfo: pathfinding_service_web3_mock.on_fee_update(message) assert "unknown token network" in str(exinfo.value)
def get_monitoring_blockchain_events( web3: Web3, monitor_contract_address: Optional[Address], from_block: BlockNumber, to_block: BlockNumber, ) -> List[Event]: if monitor_contract_address is None: return [] monitoring_service_events = query_blockchain_events( web3=web3, contract_addresses=[monitor_contract_address], from_block=from_block, to_block=to_block, ) events: List[Event] = [] for event in monitoring_service_events: event_name = event["event"] block_number = event["blockNumber"] if event_name == MonitoringServiceEvent.NEW_BALANCE_PROOF_RECEIVED: events.append( ReceiveMonitoringNewBalanceProofEvent( token_network_address=TokenNetworkAddress( to_canonical_address( event["args"]["token_network_address"])), channel_identifier=event["args"]["channel_identifier"], reward_amount=event["args"]["reward_amount"], nonce=event["args"]["nonce"], ms_address=to_canonical_address( event["args"]["ms_address"]), raiden_node_address=to_canonical_address( event["args"]["raiden_node_address"]), block_number=block_number, )) elif event_name == MonitoringServiceEvent.REWARD_CLAIMED: events.append( ReceiveMonitoringRewardClaimedEvent( ms_address=to_canonical_address( event["args"]["ms_address"]), amount=event["args"]["amount"], reward_identifier=encode_hex( event["args"]["reward_identifier"]), block_number=block_number, )) return events
def on_capacity_update(self, message: PFSCapacityUpdate) -> Channel: token_network = self._validate_pfs_capacity_update(message) log.debug("Received Capacity Update", message=message) self.database.upsert_capacity_update(message) updating_capacity_partner, other_capacity_partner = self.database.get_capacity_updates( updating_participant=message.other_participant, token_network_address=TokenNetworkAddress( message.canonical_identifier.token_network_address), channel_id=message.canonical_identifier.channel_identifier, ) return token_network.handle_channel_balance_update_message( message=message, updating_capacity_partner=updating_capacity_partner, other_capacity_partner=other_capacity_partner, )
def test_token_channel_opened(pathfinding_service_mock, token_network_model): setup_channel(pathfinding_service_mock, token_network_model) assert len(pathfinding_service_mock.token_networks) == 1 assert len(token_network_model.channel_id_to_addresses) == 1 # Test invalid token network address channel_event = ReceiveChannelOpenedEvent( token_network_address=TokenNetworkAddress(bytes([2] * 20)), channel_identifier=ChannelID(1), participant1=PARTICIPANT1, participant2=PARTICIPANT2, block_number=BlockNumber(1), ) pathfinding_service_mock.handle_event(channel_event) assert len(pathfinding_service_mock.token_networks) == 1 assert len(token_network_model.channel_id_to_addresses) == 1
def test_channels(pathfinding_service_mock): # Participants need to be ordered parts = sorted([make_address(), make_address(), make_address()]) token_network_address = TokenNetworkAddress(b"1" * 20) # register token network internally database = pathfinding_service_mock.database database.upsert_token_network(token_network_address, DEFAULT_TOKEN_NETWORK_SETTLE_TIMEOUT) channel1 = Channel( token_network_address=token_network_address, channel_id=ChannelID(1), participant1=parts[0], participant2=parts[1], ) channel2 = Channel( token_network_address=token_network_address, channel_id=ChannelID(2), participant1=parts[1], participant2=parts[2], ) # Test `upsert_channel` and `get_channels` database.upsert_channel(channel1) assert [chan.channel_id for chan in database.get_channels()] == [channel1.channel_id] database.upsert_channel(channel2) assert [chan.channel_id for chan in database.get_channels()] == [ channel1.channel_id, channel2.channel_id, ] # Test `delete_channel` assert database.delete_channel(channel1.token_network_address, channel1.channel_id) assert [chan.channel_id for chan in database.get_channels()] == [channel2.channel_id] assert not database.delete_channel(channel1.token_network_address, channel1.channel_id) assert [chan.channel_id for chan in database.get_channels()] == [channel2.channel_id]
def test_prometheus_event_handling_no_exceptions(pathfinding_service_mock_empty): metrics_state = save_metrics_state(metrics.REGISTRY) pfs = pathfinding_service_mock_empty token_address = TokenAddress(bytes([1] * 20)) token_network_address = TokenNetworkAddress(bytes([2] * 20)) channel_id = ChannelID(1) p1 = Address(bytes([3] * 20)) p2 = Address(bytes([4] * 20)) events = [ ReceiveTokenNetworkCreatedEvent( token_address=token_address, token_network_address=token_network_address, settle_timeout=DEFAULT_TOKEN_NETWORK_SETTLE_TIMEOUT, block_number=BlockNumber(1), ), ReceiveChannelOpenedEvent( token_network_address=token_network_address, channel_identifier=channel_id, participant1=p1, participant2=p2, block_number=BlockNumber(2), ), ] for event in events: pfs.handle_event(event) # check that we have non-zero processing time for the events we created assert ( metrics_state.get_delta( "events_processing_duration_seconds_sum", labels={"event_type": event.__class__.__name__}, ) > 0.0 ) # there should be no exception raised assert ( metrics_state.get_delta( "events_exceptions_total", labels={"event_type": event.__class__.__name__} ) == 0.0 )
def test_token_network_created(pathfinding_service_mock): token_address = TokenAddress(bytes([1] * 20)) token_network_address = TokenNetworkAddress(bytes(bytes([2] * 20))) network_event = ReceiveTokenNetworkCreatedEvent( token_address=token_address, token_network_address=token_network_address, settle_timeout=DEFAULT_TOKEN_NETWORK_SETTLE_TIMEOUT, block_number=BlockNumber(1), ) assert not pathfinding_service_mock.follows_token_network(token_network_address) assert len(pathfinding_service_mock.token_networks) == 1 pathfinding_service_mock.handle_event(network_event) assert pathfinding_service_mock.follows_token_network(token_network_address) assert len(pathfinding_service_mock.token_networks) == 2 # Test idempotency pathfinding_service_mock.handle_event(network_event) assert pathfinding_service_mock.follows_token_network(token_network_address) assert len(pathfinding_service_mock.token_networks) == 2
def test_feedback(pathfinding_service_mock): token_network_address = TokenNetworkAddress(b"1" * 20) route = [Address(b"2" * 20), Address(b"3" * 20)] other_route = [Address(b"2" * 20), Address(b"4" * 20)] estimated_fee = 0 token = FeedbackToken(token_network_address=token_network_address) other_token = FeedbackToken(token_network_address=token_network_address) database = pathfinding_service_mock.database assert not db_has_feedback_for(database=database, token=token, route=route) assert not db_has_feedback_for( database=database, token=token, route=other_route) assert not db_has_feedback_for( database=database, token=other_token, route=route) database.prepare_feedback(token=token, route=route, estimated_fee=estimated_fee) assert not db_has_feedback_for(database=database, token=token, route=route) assert not db_has_feedback_for( database=database, token=other_token, route=route) assert not db_has_feedback_for( database=database, token=token, route=other_route) rowcount = database.update_feedback(token=token, route=route, successful=True) assert rowcount == 1 assert db_has_feedback_for(database=database, token=token, route=route) assert not db_has_feedback_for( database=database, token=other_token, route=route) assert not db_has_feedback_for( database=database, token=token, route=other_route) rowcount = database.update_feedback(token=token, route=route, successful=True) assert rowcount == 0
def request_monitoring_message(token_network, get_accounts, get_private_key) -> RequestMonitoring: c1, c2 = get_accounts(2) balance_proof_c2 = HashedBalanceProof( channel_identifier=ChannelID(1), token_network_address=TokenNetworkAddress( to_canonical_address(token_network.address)), chain_id=ChainID(61), nonce=Nonce(2), additional_hash="0x%064x" % 0, transferred_amount=TokenAmount(1), locked_amount=TokenAmount(0), locksroot=encode_hex(LOCKSROOT_OF_NO_LOCKS), priv_key=get_private_key(c2), ) return balance_proof_c2.get_request_monitoring( privkey=get_private_key(c1), reward_amount=TokenAmount(1), monitoring_service_contract_address=MonitoringServiceAddress( bytes([11] * 20)), )
def f( chain_id: ChainID = TEST_CHAIN_ID, amount: TokenAmount = TokenAmount(50), nonce: Nonce = Nonce(1), channel_id: ChannelID = ChannelID(1), ) -> RequestMonitoring: balance_proof = HashedBalanceProof( channel_identifier=channel_id, token_network_address=TokenNetworkAddress(b"1" * 20), chain_id=chain_id, nonce=nonce, additional_hash="", balance_hash=encode_hex(bytes([amount])), priv_key=PrivateKey(get_random_privkey()), ) request_monitoring = balance_proof.get_request_monitoring( privkey=non_closing_privkey, reward_amount=TokenAmount(55), monitoring_service_contract_address=TEST_MSC_ADDRESS, ) # usually not a property of RequestMonitoring, but added for convenience in these tests request_monitoring.non_closing_signer = to_checksum_address(non_closing_address) return request_monitoring
def test_edge_weight(addresses): # pylint: disable=assigning-non-slot channel_id = ChannelID(1) participant1 = addresses[0] participant2 = addresses[1] capacity = TokenAmount(int(20 * 1e18)) capacity_partner = TokenAmount(int(10 * 1e18)) channel = Channel( token_network_address=TokenNetworkAddress(bytes([1])), channel_id=channel_id, participant1=participant1, participant2=participant2, capacity1=capacity, capacity2=capacity_partner, ) view, view_partner = channel.views amount = PaymentAmount(int(1e18)) # one RDN # no penalty assert (TokenNetwork.edge_weight(visited={}, view=view, view_from_partner=view_partner, amount=amount, fee_penalty=0) == 1) # channel already used in a previous route assert (TokenNetwork.edge_weight( visited={channel_id: 2}, view=view, view_from_partner=view_partner, amount=amount, fee_penalty=0, ) == 3) # absolute fee view.fee_schedule_sender.flat = FeeAmount(int(0.03e18)) assert (TokenNetwork.edge_weight( visited={}, view=view, view_from_partner=view_partner, amount=amount, fee_penalty=100, ) == 4) # relative fee view.fee_schedule_sender.flat = FeeAmount(0) view.fee_schedule_sender.proportional = ProportionalFeeAmount(int(0.01e6)) assert (TokenNetwork.edge_weight( visited={}, view=view, view_from_partner=view_partner, amount=amount, fee_penalty=100, ) == 2) # partner has not enough capacity for refund (no_refund_weight) -> edge weight +1 view_partner.capacity = TokenAmount(0) assert (TokenNetwork.edge_weight( visited={}, view=view, view_from_partner=view_partner, amount=amount, fee_penalty=100, ) == 3)
def populate_token_network_random( token_network_model: TokenNetwork, private_keys: List[str] ) -> None: number_of_channels = 300 # seed for pseudo-randomness from config constant, that changes from time to time random.seed(number_of_channels) for channel_id_int in range(number_of_channels): channel_id = ChannelID(channel_id_int) private_key1, private_key2 = random.sample(private_keys, 2) address1 = private_key_to_address(private_key1) address2 = private_key_to_address(private_key2) token_network_model.handle_channel_opened_event( channel_identifier=channel_id, participant1=address1, participant2=address2, ) # deposit to channels deposit1 = TokenAmount(random.randint(0, 1000)) deposit2 = TokenAmount(random.randint(0, 1000)) address1, address2 = token_network_model.channel_id_to_addresses[channel_id] token_network_model.handle_channel_balance_update_message( PFSCapacityUpdate( canonical_identifier=CanonicalIdentifier( chain_identifier=ChainID(61), channel_identifier=channel_id, token_network_address=TokenNetworkAddress(token_network_model.address), ), updating_participant=address1, other_participant=address2, updating_nonce=Nonce(1), other_nonce=Nonce(1), updating_capacity=deposit1, other_capacity=deposit2, reveal_timeout=BlockTimeout(2), signature=EMPTY_SIGNATURE, ), updating_capacity_partner=TokenAmount(0), other_capacity_partner=TokenAmount(0), ) token_network_model.handle_channel_balance_update_message( PFSCapacityUpdate( canonical_identifier=CanonicalIdentifier( chain_identifier=ChainID(61), channel_identifier=channel_id, token_network_address=TokenNetworkAddress(token_network_model.address), ), updating_participant=address2, other_participant=address1, updating_nonce=Nonce(2), other_nonce=Nonce(1), updating_capacity=deposit2, other_capacity=deposit1, reveal_timeout=BlockTimeout(2), signature=EMPTY_SIGNATURE, ), updating_capacity_partner=TokenAmount(deposit1), other_capacity_partner=TokenAmount(deposit2), )
def populate_token_network( token_network: TokenNetwork, reachability_state: SimpleReachabilityContainer, addresses: List[Address], channel_descriptions: List, ): for ( channel_id, ( p1_index, p1_capacity, _p1_fee, p1_reveal_timeout, p1_reachability, p2_index, p2_capacity, _p2_fee, p2_reveal_timeout, p2_reachability, ), ) in enumerate(channel_descriptions): participant1 = addresses[p1_index] participant2 = addresses[p2_index] token_network.handle_channel_opened_event( channel_identifier=ChannelID(channel_id), participant1=participant1, participant2=participant2, ) token_network.handle_channel_balance_update_message( PFSCapacityUpdate( canonical_identifier=CanonicalIdentifier( chain_identifier=ChainID(61), channel_identifier=ChannelID(channel_id), token_network_address=TokenNetworkAddress(token_network.address), ), updating_participant=addresses[p1_index], other_participant=addresses[p2_index], updating_nonce=Nonce(1), other_nonce=Nonce(1), updating_capacity=p1_capacity, other_capacity=p2_capacity, reveal_timeout=p1_reveal_timeout, signature=EMPTY_SIGNATURE, ), updating_capacity_partner=TokenAmount(0), other_capacity_partner=TokenAmount(0), ) token_network.handle_channel_balance_update_message( PFSCapacityUpdate( canonical_identifier=CanonicalIdentifier( chain_identifier=ChainID(61), channel_identifier=ChannelID(channel_id), token_network_address=TokenNetworkAddress(token_network.address), ), updating_participant=addresses[p2_index], other_participant=addresses[p1_index], updating_nonce=Nonce(2), other_nonce=Nonce(1), updating_capacity=p2_capacity, other_capacity=p1_capacity, reveal_timeout=p2_reveal_timeout, signature=EMPTY_SIGNATURE, ), updating_capacity_partner=TokenAmount(p1_capacity), other_capacity_partner=TokenAmount(p2_capacity), ) # Update presence state according to scenario reachability_state.reachabilities[participant1] = p1_reachability reachability_state.reachabilities[participant2] = p2_reachability
def token_network_model() -> TokenNetwork: return TokenNetwork(TokenNetworkAddress(bytes([1] * 20)), DEFAULT_TOKEN_NETWORK_SETTLE_TIMEOUT)
from eth_utils import decode_hex from raiden_common.utils.typing import ( ChainID, ChannelID, MonitoringServiceAddress, TokenNetworkAddress, ) from raiden_libs.utils import private_key_to_address KEYSTORE_FILE_NAME = "keystore.txt" KEYSTORE_PASSWORD = "******" TEST_MSC_ADDRESS = MonitoringServiceAddress(b"9" * 20) TEST_CHAIN_ID = ChainID(131277322940537) DEFAULT_TOKEN_NETWORK_ADDRESS = TokenNetworkAddress( decode_hex("0x6e46B62a245D9EE7758B8DdCCDD1B85fF56B9Bc9")) DEFAULT_TOKEN_NETWORK_SETTLE_TIMEOUT = 1500 # time in seconds PRIVATE_KEY_1 = bytes([1] * 32) PRIVATE_KEY_1_ADDRESS = private_key_to_address(PRIVATE_KEY_1) PRIVATE_KEY_2 = bytes([2] * 32) PRIVATE_KEY_2_ADDRESS = private_key_to_address(PRIVATE_KEY_2) PRIVATE_KEY_3 = bytes([3] * 32) PRIVATE_KEY_3_ADDRESS = private_key_to_address(PRIVATE_KEY_3) DEFAULT_CHANNEL_ID = ChannelID(0)
def test_pfs_with_mocked_client( # pylint: disable=too-many-arguments web3, token_network_registry_contract, channel_descriptions_case_1: List, get_accounts, user_deposit_contract, token_network, custom_token, create_channel, get_private_key, ): # pylint: disable=too-many-locals """Instantiates some MockClients and the PathfindingService. Mocks blockchain events to setup a token network with a given topology, specified in the channel_description fixture. Tests all PFS methods w.r.t. to that topology """ clients = get_accounts(7) token_network_address = TokenNetworkAddress( to_canonical_address(token_network.address)) with patch("pathfinding_service.service.MatrixListener", new=Mock): pfs = PathfindingService( web3=web3, contracts={ CONTRACT_TOKEN_NETWORK_REGISTRY: token_network_registry_contract, CONTRACT_USER_DEPOSIT: user_deposit_contract, }, required_confirmations=BlockTimeout(1), db_filename=":memory:", poll_interval=0.1, sync_start_block=BlockNumber(0), private_key=PrivateKey( decode_hex( "3a1076bf45ab87712ad64ccb3b10217737f7faacbf2872e88fdd9a537d8fe266" )), ) # greenlet needs to be started and context switched to pfs.start() pfs.updated.wait(timeout=5) # there should be one token network registered assert len(pfs.token_networks) == 1 token_network_model = pfs.token_networks[token_network_address] graph = token_network_model.G channel_identifiers = [] for ( p1_index, p1_capacity, _p1_fee, _p1_reveal_timeout, _p1_reachability, p2_index, p2_capacity, _p2_fee, _p2_reveal_timeout, _p2_reachability, ) in channel_descriptions_case_1: # order is important here because we check order later channel_id = create_channel(clients[p1_index], clients[p2_index])[0] channel_identifiers.append(channel_id) for address, partner_address, amount in [ (clients[p1_index], clients[p2_index], p1_capacity), (clients[p2_index], clients[p1_index], p2_capacity), ]: if amount == 0: continue custom_token.functions.mint(amount).transact({"from": address}) custom_token.functions.approve(token_network.address, amount).transact({"from": address}) token_network.functions.setTotalDeposit( channel_id, address, amount, partner_address).transact({"from": address}) web3.testing.mine(1) # 1 confirmation block pfs.updated.wait(timeout=5) # there should be as many open channels as described assert len(token_network_model.channel_id_to_addresses.keys()) == len( channel_descriptions_case_1) # check that deposits, settle_timeout and transfers got registered for index in range(len(channel_descriptions_case_1)): channel_identifier = channel_identifiers[index] p1_address, p2_address = token_network_model.channel_id_to_addresses[ channel_identifier] view1: ChannelView = graph[p1_address][p2_address]["view"] view2: ChannelView = graph[p2_address][p1_address]["view"] assert view1.reveal_timeout == DEFAULT_REVEAL_TIMEOUT assert view2.reveal_timeout == DEFAULT_REVEAL_TIMEOUT # now close all channels for ( index, ( p1_index, _p1_capacity, _p1_fee, _p1_reveal_timeout, _p1_reachability, p2_index, _p2_capacity, _p2_fee, _p2_reveal_timeout, _p2_reachability, ), ) in enumerate(channel_descriptions_case_1): channel_id = channel_identifiers[index] balance_proof = HashedBalanceProof( nonce=Nonce(1), transferred_amount=0, priv_key=get_private_key(clients[p2_index]), channel_identifier=channel_id, token_network_address=TokenNetworkAddress( to_canonical_address(token_network.address)), chain_id=TEST_CHAIN_ID, additional_hash="0x%064x" % 0, locked_amount=0, locksroot=encode_hex(LOCKSROOT_OF_NO_LOCKS), ) token_network.functions.closeChannel( channel_id, clients[p2_index], clients[p1_index], balance_proof.balance_hash, balance_proof.nonce, balance_proof.additional_hash, balance_proof.signature, balance_proof.get_counter_signature( get_private_key(clients[p1_index])), ).transact({ "from": clients[p1_index], "gas": 200_000 }) web3.testing.mine(1) # 1 confirmation block pfs.updated.wait(timeout=5) # there should be no channels assert len(token_network_model.channel_id_to_addresses.keys()) == 0 pfs.stop()