def get_fee_update_message( # pylint: disable=too-many-arguments updating_participant: Address, chain_id=ChainID(61), channel_identifier=DEFAULT_CHANNEL_ID, token_network_address: TokenNetworkAddress = DEFAULT_TOKEN_NETWORK_ADDRESS, fee_schedule: FeeScheduleState = FeeScheduleState( cap_fees=True, flat=FeeAmount(1), proportional=ProportionalFeeAmount(1)), timestamp: datetime = datetime.utcnow(), privkey_signer: bytes = PRIVATE_KEY_1, ) -> PFSFeeUpdate: fee_message = PFSFeeUpdate( canonical_identifier=CanonicalIdentifier( chain_identifier=chain_id, channel_identifier=channel_identifier, token_network_address=token_network_address, ), updating_participant=updating_participant, fee_schedule=fee_schedule, timestamp=timestamp, signature=EMPTY_SIGNATURE, ) fee_message.sign(LocalSigner(privkey_signer)) return fee_message
def test_waiting_messages(pathfinding_service_mock): participant1_privkey, participant1 = make_privkey_address() token_network_address = TokenNetworkAddress(b"1" * 20) channel_id = ChannelID(1) # register token network internally database = pathfinding_service_mock.database database.conn.execute( "INSERT INTO token_network(address) VALUES (?)", [to_checksum_address(token_network_address)], ) fee_update = PFSFeeUpdate( canonical_identifier=CanonicalIdentifier( chain_identifier=ChainID(1), token_network_address=token_network_address, channel_identifier=channel_id, ), updating_participant=participant1, fee_schedule=FeeScheduleState(), timestamp=datetime.utcnow(), signature=EMPTY_SIGNATURE, ) fee_update.sign(LocalSigner(participant1_privkey)) capacity_update = PFSCapacityUpdate( canonical_identifier=CanonicalIdentifier( chain_identifier=ChainID(1), token_network_address=token_network_address, channel_identifier=channel_id, ), updating_participant=make_address(), other_participant=make_address(), updating_nonce=Nonce(1), other_nonce=Nonce(1), updating_capacity=TokenAmount(100), other_capacity=TokenAmount(111), reveal_timeout=50, signature=EMPTY_SIGNATURE, ) capacity_update.sign(LocalSigner(participant1_privkey)) for message in (fee_update, capacity_update): database.insert_waiting_message(message) recovered_messages = list( database.pop_waiting_messages( token_network_address=token_network_address, channel_id=channel_id ) ) assert len(recovered_messages) == 1 assert message == recovered_messages[0] recovered_messages2 = list( database.pop_waiting_messages( token_network_address=token_network_address, channel_id=channel_id ) ) assert len(recovered_messages2) == 0
def test_fee_update(): channel_state = factories.create(factories.NettingChannelStateProperties()) message = PFSFeeUpdate.from_channel_state(channel_state) message.sign(signer) assert message == DictSerializer.deserialize( DictSerializer.serialize(message))
def send_pfs_update( raiden: "RaidenService", canonical_identifier: CanonicalIdentifier, update_fee_schedule: bool = False, ) -> None: if raiden.routing_mode == RoutingMode.PRIVATE: return channel_state = views.get_channelstate_by_canonical_identifier( chain_state=views.state_from_raiden(raiden), canonical_identifier=canonical_identifier ) if channel_state is None: return capacity_msg = PFSCapacityUpdate.from_channel_state(channel_state) capacity_msg.sign(raiden.signer) raiden.transport.send_global(constants.PATH_FINDING_BROADCASTING_ROOM, capacity_msg) log.debug("Sent a PFS Capacity Update", message=capacity_msg, channel_state=channel_state) if update_fee_schedule: fee_msg = PFSFeeUpdate.from_channel_state(channel_state) fee_msg.sign(raiden.signer) raiden.transport.send_global(constants.PATH_FINDING_BROADCASTING_ROOM, fee_msg) log.debug("Sent a PFS Fee Update", message=fee_msg, channel_state=channel_state)
def set_fee(self, node1: int, node2: int, **fee_params): channel_id = self.G[a(node1)][a(node2)]["view"].channel_id self.handle_channel_fee_update( PFSFeeUpdate( canonical_identifier=CanonicalIdentifier( chain_identifier=ChainID(1), token_network_address=self.address, channel_identifier=channel_id, ), updating_participant=a(node1), fee_schedule=RaidenFeeSchedule(**fee_params), signature=EMPTY_SIGNATURE, timestamp=datetime.now(timezone.utc), ))
def test_pfs_broadcast_messages( local_matrix_servers, retry_interval_initial, retry_interval_max, retries_before_backoff, monkeypatch, broadcast_rooms, route_mode, ): """ Test that RaidenService broadcasts PFSCapacityUpdate messages to PATH_FINDING_BROADCASTING_ROOM room on newly received balance proofs. """ transport = MatrixTransport( config=MatrixTransportConfig( broadcast_rooms=broadcast_rooms, retries_before_backoff=retries_before_backoff, retry_interval_initial=retry_interval_initial, retry_interval_max=retry_interval_max, server=local_matrix_servers[0], available_servers=[local_matrix_servers[0]], ), environment=Environment.DEVELOPMENT, ) transport._client.api.retry_timeout = 0 transport._send_raw = MagicMock() raiden_service = MockRaidenService(None) raiden_service.config.services.monitoring_enabled = True raiden_service.routing_mode = route_mode transport.start(raiden_service, [], None) pfs_room_name = make_room_alias(transport.chain_id, PATH_FINDING_BROADCASTING_ROOM) pfs_room = transport._broadcast_rooms.get(pfs_room_name) assert isinstance(pfs_room, Room) pfs_room.send_text = MagicMock(spec=pfs_room.send_text) raiden_service.transport = transport transport.log = MagicMock() # send PFSCapacityUpdate balance_proof = factories.create(HOP1_BALANCE_PROOF) channel_state = factories.create(factories.NettingChannelStateProperties()) channel_state.our_state.balance_proof = balance_proof channel_state.partner_state.balance_proof = balance_proof monkeypatch.setattr( raiden.transfer.views, "get_channelstate_by_canonical_identifier", lambda *a, **kw: channel_state, ) send_pfs_update(raiden=raiden_service, canonical_identifier=balance_proof.canonical_identifier) gevent.idle() with gevent.Timeout(2): while pfs_room.send_text.call_count < 1: gevent.idle() assert pfs_room.send_text.call_count == 1 # send PFSFeeUpdate channel_state = factories.create(factories.NettingChannelStateProperties()) fee_update = PFSFeeUpdate.from_channel_state(channel_state) fee_update.sign(raiden_service.signer) raiden_service.transport.broadcast(PATH_FINDING_BROADCASTING_ROOM, fee_update) with gevent.Timeout(2): while pfs_room.send_text.call_count < 2: gevent.idle() assert pfs_room.send_text.call_count == 2 msg_data = json.loads(pfs_room.send_text.call_args[0][0]) assert msg_data["type"] == "PFSFeeUpdate" transport.stop() transport.greenlet.get()
def test_encoding_and_decoding(): message_factories = ( factories.LockedTransferProperties(), factories.RefundTransferProperties(), factories.LockExpiredProperties(), factories.UnlockProperties(), ) messages = [factories.create(factory) for factory in message_factories] # TODO Handle these with factories once #5091 is implemented messages.append( Delivered( delivered_message_identifier=factories.make_message_identifier(), signature=factories.make_signature(), )) messages.append( Processed( message_identifier=factories.make_message_identifier(), signature=factories.make_signature(), )) messages.append( RevealSecret( message_identifier=factories.make_message_identifier(), secret=factories.make_secret(), signature=factories.make_signature(), )) messages.append( SecretRequest( message_identifier=factories.make_message_identifier(), payment_identifier=factories.make_payment_id(), secrethash=factories.make_secret_hash(), amount=factories.make_token_amount(), expiration=factories.make_block_number(), signature=factories.make_signature(), )) messages.append( WithdrawRequest( message_identifier=factories.make_message_identifier(), chain_id=factories.make_chain_id(), token_network_address=factories.make_token_network_address(), channel_identifier=factories.make_channel_identifier(), participant=factories.make_address(), total_withdraw=factories.make_token_amount(), nonce=factories.make_nonce(), expiration=factories.make_block_number(), signature=factories.make_signature(), )) messages.append( WithdrawConfirmation( message_identifier=factories.make_message_identifier(), chain_id=factories.make_chain_id(), token_network_address=factories.make_token_network_address(), channel_identifier=factories.make_channel_identifier(), participant=factories.make_address(), total_withdraw=factories.make_token_amount(), nonce=factories.make_nonce(), expiration=factories.make_block_number(), signature=factories.make_signature(), )) messages.append( WithdrawExpired( message_identifier=factories.make_message_identifier(), chain_id=factories.make_chain_id(), token_network_address=factories.make_token_network_address(), channel_identifier=factories.make_channel_identifier(), participant=factories.make_address(), total_withdraw=factories.make_token_amount(), nonce=factories.make_nonce(), expiration=factories.make_block_number(), signature=factories.make_signature(), )) messages.append( PFSCapacityUpdate( canonical_identifier=factories.make_canonical_identifier(), updating_participant=factories.make_address(), other_participant=factories.make_address(), updating_nonce=factories.make_nonce(), other_nonce=factories.make_nonce(), updating_capacity=factories.make_token_amount(), other_capacity=factories.make_token_amount(), reveal_timeout=factories.make_uint64(), signature=factories.make_signature(), )) messages.append( PFSFeeUpdate( canonical_identifier=factories.make_canonical_identifier(), updating_participant=factories.make_address(), fee_schedule=factories.create( factories.FeeScheduleStateProperties()), timestamp=datetime.now(), signature=factories.make_signature(), )) messages.append( RequestMonitoring( reward_amount=factories.make_token_amount(), balance_proof=SignedBlindedBalanceProof. from_balance_proof_signed_state( factories.create( factories.BalanceProofSignedStateProperties())), monitoring_service_contract_address=factories.make_address(), non_closing_participant=factories.make_address(), non_closing_signature=factories.make_signature(), signature=factories.make_signature(), )) for message in messages: serialized = MessageSerializer.serialize(message) deserialized = MessageSerializer.deserialize(serialized) assert deserialized == message
def test_pfs_global_messages( local_matrix_servers, private_rooms, retry_interval, retries_before_backoff, monkeypatch, global_rooms, route_mode, ): """ Test that RaidenService sends PFSCapacityUpdate messages to global PATH_FINDING_BROADCASTING_ROOM room on newly received balance proofs. """ transport = MatrixTransport({ "global_rooms": global_rooms + [PATH_FINDING_BROADCASTING_ROOM], "retries_before_backoff": retries_before_backoff, "retry_interval": retry_interval, "server": local_matrix_servers[0], "server_name": local_matrix_servers[0].netloc, "available_servers": [local_matrix_servers[0]], "private_rooms": private_rooms, }) transport._client.api.retry_timeout = 0 transport._send_raw = MagicMock() raiden_service = MockRaidenService(None) raiden_service.config = dict(services=dict(monitoring_enabled=True)) raiden_service.routing_mode = route_mode transport.start(raiden_service, raiden_service.message_handler, None) pfs_room_name = make_room_alias(transport.chain_id, PATH_FINDING_BROADCASTING_ROOM) pfs_room = transport._global_rooms.get(pfs_room_name) assert isinstance(pfs_room, Room) pfs_room.send_text = MagicMock(spec=pfs_room.send_text) raiden_service.transport = transport transport.log = MagicMock() # send PFSCapacityUpdate balance_proof = factories.create(HOP1_BALANCE_PROOF) channel_state = factories.create(factories.NettingChannelStateProperties()) channel_state.our_state.balance_proof = balance_proof channel_state.partner_state.balance_proof = balance_proof monkeypatch.setattr( raiden.transfer.views, "get_channelstate_by_canonical_identifier", lambda *a, **kw: channel_state, ) send_pfs_update(raiden=raiden_service, canonical_identifier=balance_proof.canonical_identifier) gevent.idle() with gevent.Timeout(2): while pfs_room.send_text.call_count < 1: gevent.idle() assert pfs_room.send_text.call_count == 1 # send PFSFeeUpdate channel_state = factories.create(factories.NettingChannelStateProperties()) fee_update = PFSFeeUpdate.from_channel_state(channel_state) fee_update.sign(raiden_service.signer) raiden_service.transport.send_global(PATH_FINDING_BROADCASTING_ROOM, fee_update) with gevent.Timeout(2): while pfs_room.send_text.call_count < 2: gevent.idle() assert pfs_room.send_text.call_count == 2 msg_data = json.loads(pfs_room.send_text.call_args[0][0]) assert msg_data["type"] == "PFSFeeUpdate" transport.stop() transport.get()
canonical_identifier=factories.make_canonical_identifier(), updating_participant=factories.make_address(), other_participant=factories.make_address(), updating_nonce=factories.make_nonce(), other_nonce=factories.make_nonce(), updating_capacity=factories.make_token_amount(), other_capacity=factories.make_token_amount(), reveal_timeout=factories.make_block_timeout(), signature=factories.make_signature(), ) ) messages.append( PFSFeeUpdate( canonical_identifier=factories.make_canonical_identifier(), updating_participant=factories.make_address(), fee_schedule=factories.create(factories.FeeScheduleStateProperties()), timestamp=datetime(2000, 1, 1), signature=factories.make_signature(), ) ) messages.append( RequestMonitoring( reward_amount=factories.make_token_amount(), balance_proof=SignedBlindedBalanceProof.from_balance_proof_signed_state( factories.create(factories.BalanceProofSignedStateProperties()) ), monitoring_service_contract_address=factories.make_monitoring_service_address(), non_closing_participant=factories.make_address(), non_closing_signature=factories.make_signature(), signature=factories.make_signature(), )