def test_join_invalid_discovery(
    local_matrix_servers,
    retry_interval_initial,
    retry_interval_max,
    retries_before_backoff,
    broadcast_rooms,
):
    """join_broadcast_room tries to join on all servers on available_servers config

    If any of the servers isn't reachable by synapse, it'll return a 500 response, which needs
    to be handled, and if no discovery room is found on any of the available_servers, one in
    our current server should be created
    """
    transport = MatrixTransport(
        config=MatrixTransportConfig(
            broadcast_rooms=broadcast_rooms,
            retries_before_backoff=retries_before_backoff,
            retry_interval_initial=retry_interval_initial,
            retry_interval_max=retry_interval_max,
            server=local_matrix_servers[0],
            available_servers=["http://invalid.server"],
        ),
        environment=Environment.DEVELOPMENT,
    )
    transport._client.api.retry_timeout = 0
    transport._send_raw = MagicMock()
    raiden_service = MockRaidenService(None)

    transport.start(raiden_service, [], None)
    transport.log = MagicMock()
    discovery_room_name = make_room_alias(transport.chain_id, "discovery")
    assert isinstance(transport._broadcast_rooms.get(discovery_room_name), Room)

    transport.stop()
    transport.greenlet.get()
Exemplo n.º 2
0
def test_matrix_discovery_room_offline_server(local_matrix_servers,
                                              retries_before_backoff,
                                              retry_interval, private_rooms,
                                              global_rooms):

    transport = MatrixTransport({
        "global_rooms":
        global_rooms,
        "retries_before_backoff":
        retries_before_backoff,
        "retry_interval":
        retry_interval,
        "server":
        local_matrix_servers[0],
        "server_name":
        local_matrix_servers[0].netloc,
        "available_servers": [local_matrix_servers[0], "https://localhost:1"],
        "private_rooms":
        private_rooms,
    })
    transport.start(MockRaidenService(None), MessageHandler(set()), "")
    gevent.sleep(0.2)

    discovery_room_name = make_room_alias(transport.network_id, "discovery")
    assert isinstance(transport._global_rooms.get(discovery_room_name), Room)

    transport.stop()
    transport.get()
Exemplo n.º 3
0
def test_matrix_discovery_room_offline_server(
    local_matrix_servers,
    retries_before_backoff,
    retry_interval,
    private_rooms,
):

    transport = MatrixTransport({
        'global_rooms': ['discovery'],
        'retries_before_backoff':
        retries_before_backoff,
        'retry_interval':
        retry_interval,
        'server':
        local_matrix_servers[0],
        'server_name':
        local_matrix_servers[0].netloc,
        'available_servers': [local_matrix_servers[0], 'https://localhost:1'],
        'private_rooms':
        private_rooms,
    })
    transport.start(MockRaidenService(None), MessageHandler(set()), '')
    gevent.sleep(.2)

    discovery_room_name = make_room_alias(transport.network_id, 'discovery')
    assert isinstance(transport._global_rooms.get(discovery_room_name), Room)

    transport.stop()
    transport.get()
Exemplo n.º 4
0
    def __init__(
        self,
        private_key: PrivateKey,
        chain_id: ChainID,
        device_id: DeviceIDs,
        message_received_callback: Callable[[Message], None],
        servers: Optional[List[str]] = None,
    ) -> None:
        super().__init__()

        self.chain_id = chain_id
        self.device_id = device_id
        self.message_received_callback = message_received_callback
        self._displayname_cache = DisplayNameCache()
        self.startup_finished = AsyncResult()
        self._client_manager = ClientManager(
            available_servers=servers,
            device_id=self.device_id,
            broadcast_room_alias_prefix=make_room_alias(
                chain_id, DISCOVERY_DEFAULT_ROOM),
            chain_id=self.chain_id,
            private_key=private_key,
            handle_matrix_sync=self._handle_matrix_sync,
        )

        self.base_url = self._client.api.base_url
        self.user_manager = MultiClientUserAddressManager(
            client=self._client,
            displayname_cache=self._displayname_cache,
        )

        self._rate_limiter = RateLimiter(
            allowed_bytes=MATRIX_RATE_LIMIT_ALLOWED_BYTES,
            reset_interval=MATRIX_RATE_LIMIT_RESET_INTERVAL,
        )
Exemplo n.º 5
0
def local_matrix_servers_with_executor(
    request,
    transport_protocol,
    matrix_server_count,
    synapse_config_generator,
    port_generator,
    broadcast_rooms,
    chain_id,
) -> Iterable[List[Tuple[ParsedURL, HTTPExecutor]]]:
    if transport_protocol is not TransportProtocol.MATRIX:
        yield []
        return

    broadcast_rooms_aliases = [
        make_room_alias(chain_id, room_name) for room_name in broadcast_rooms
    ]

    starter = matrix_server_starter(
        free_port_generator=port_generator,
        broadcast_rooms_aliases=broadcast_rooms_aliases,
        count=matrix_server_count,
        config_generator=synapse_config_generator,
        log_context=request.node.name,
    )
    with starter as servers:
        yield servers
def test_matrix_discovery_room_offline_server(
    local_matrix_servers,
    retries_before_backoff,
    retry_interval_initial,
    retry_interval_max,
    broadcast_rooms,
):

    transport = MatrixTransport(
        config=MatrixTransportConfig(
            broadcast_rooms=broadcast_rooms,
            retries_before_backoff=retries_before_backoff,
            retry_interval_initial=retry_interval_initial,
            retry_interval_max=retry_interval_max,
            server=local_matrix_servers[0],
            available_servers=[local_matrix_servers[0], "https://localhost:1"],
        ),
        environment=Environment.DEVELOPMENT,
    )
    transport.start(MockRaidenService(None), [], "")

    discovery_room_name = make_room_alias(transport.chain_id, "discovery")
    with gevent.Timeout(1):
        while not isinstance(transport._broadcast_rooms.get(discovery_room_name), Room):
            gevent.sleep(0.1)

    transport.stop()
    transport.greenlet.get()
Exemplo n.º 7
0
def test_matrix_discovery_room_offline_server(local_matrix_servers,
                                              retries_before_backoff,
                                              retry_interval, broadcast_rooms):

    transport = MatrixTransport({
        "broadcast_rooms":
        broadcast_rooms,
        "retries_before_backoff":
        retries_before_backoff,
        "retry_interval":
        retry_interval,
        "server":
        local_matrix_servers[0],
        "server_name":
        local_matrix_servers[0].netloc,
        "available_servers": [local_matrix_servers[0], "https://localhost:1"],
    })
    transport.start(MockRaidenService(None), [], "")

    discovery_room_name = make_room_alias(transport.chain_id, "discovery")
    with gevent.Timeout(1):
        while not isinstance(
                transport._broadcast_rooms.get(discovery_room_name), Room):
            gevent.sleep(0.1)

    transport.stop()
    transport.greenlet.get()
Exemplo n.º 8
0
def test_join_invalid_discovery(local_matrix_servers, private_rooms,
                                retry_interval, retries_before_backoff,
                                global_rooms):
    """join_global_room tries to join on all servers on available_servers config

    If any of the servers isn't reachable by synapse, it'll return a 500 response, which needs
    to be handled, and if no discovery room is found on any of the available_servers, one in
    our current server should be created
    """
    transport = MatrixTransport({
        "global_rooms": global_rooms,
        "retries_before_backoff": retries_before_backoff,
        "retry_interval": retry_interval,
        "server": local_matrix_servers[0],
        "server_name": local_matrix_servers[0].netloc,
        "available_servers": ["http://invalid.server"],
        "private_rooms": private_rooms,
    })
    transport._client.api.retry_timeout = 0
    transport._send_raw = MagicMock()
    raiden_service = MockRaidenService(None)

    transport.start(raiden_service, raiden_service.message_handler, None)
    transport.log = MagicMock()
    discovery_room_name = make_room_alias(transport.network_id, "discovery")
    assert isinstance(transport._global_rooms.get(discovery_room_name), Room)

    transport.stop()
    transport.get()
Exemplo n.º 9
0
def test_assumption_search_user_directory_returns_federated_users(chain_id, local_matrix_servers):
    """The search_user_directory should return federated users.

    This assumption test was added because of issue #5285. The
    path-finding-service was not functioning properly because the call to
    `search_user_directory` did not return federated users, only local users.
    Becaused of that the PFS assumed the users were offline and didn't find any
    valid routes for the payments.
    """
    original_server_url = urlsplit(local_matrix_servers[0]).netloc

    room_alias = make_room_alias(chain_id, "broadcast_test")
    room_name_full = f"#{room_alias}:{original_server_url}"

    user_room_creator, _ = create_logged_in_client(local_matrix_servers[0])
    user_room_creator.create_room(room_alias, is_public=True)

    user_federated, _ = create_logged_in_client(local_matrix_servers[1])
    join_broadcast_room(user_federated, room_name_full)

    addresses = list()
    for _ in range(1000):
        user, signer = create_logged_in_client(local_matrix_servers[0])
        join_broadcast_room(user, room_name_full)

        # Make sure to close the session instance, otherwise there will be too
        # many file descriptors opened by the underlying urllib3 connection
        # pool.
        user.api.session.close()
        del user

        addresses.append(signer.address)

    for address in addresses:
        assert user_federated.search_user_directory(to_hex_address(address))
Exemplo n.º 10
0
def local_matrix_servers(
    request,
    transport_protocol,
    matrix_server_count,
    synapse_config_generator,
    port_generator,
    broadcast_rooms,
    chain_id,
):
    if transport_protocol is not TransportProtocol.MATRIX:
        yield [None]
        return

    broadcast_rooms_aliases = [
        make_room_alias(chain_id, room_name) for room_name in broadcast_rooms
    ]

    starter = matrix_server_starter(
        free_port_generator=port_generator,
        broadcast_rooms_aliases=broadcast_rooms_aliases,
        count=matrix_server_count,
        config_generator=synapse_config_generator,
        log_context=request.node.name,
    )
    with starter as server_urls:
        yield server_urls
Exemplo n.º 11
0
    def setup_matrix(self, service_room_suffix: str) -> Tuple[GMatrixClient, Room]:
        available_servers_url = DEFAULT_MATRIX_KNOWN_SERVERS[Environment.DEVELOPMENT]
        available_servers = get_matrix_servers(available_servers_url)

        def _http_retry_delay() -> Iterable[float]:
            # below constants are defined in raiden.app.App.DEFAULT_CONFIG
            return udp_utils.timeout_exponential_backoff(
                DEFAULT_TRANSPORT_RETRIES_BEFORE_BACKOFF,
                int(DEFAULT_TRANSPORT_MATRIX_RETRY_INTERVAL / 5),
                int(DEFAULT_TRANSPORT_MATRIX_RETRY_INTERVAL),
            )

        client = make_client(
            servers=available_servers,
            http_pool_maxsize=4,
            http_retry_timeout=40,
            http_retry_delay=_http_retry_delay,
        )

        try:
            login_or_register(client, signer=LocalSigner(private_key=decode_hex(self.private_key)))
        except (MatrixRequestError, ValueError):
            raise ConnectionError("Could not login/register to matrix.")

        try:
            room_name = make_room_alias(self.chain_id, service_room_suffix)
            monitoring_room = join_global_room(
                client=client, name=room_name, servers=available_servers
            )
        except (MatrixRequestError, TransportError):
            raise ConnectionError("Could not join monitoring broadcasting room.")

        return client, monitoring_room
Exemplo n.º 12
0
def test_pfs_global_messages(
    local_matrix_servers,
    private_rooms,
    retry_interval,
    retries_before_backoff,
    monkeypatch,
    global_rooms,
):
    """
    Test that RaidenService sends UpdatePFS messages to global
    PATH_FINDING_BROADCASTING_ROOM room on newly received balance proofs.
    """
    transport = MatrixTransport({
        "global_rooms": global_rooms,  # FIXME: #3735
        "retries_before_backoff": retries_before_backoff,
        "retry_interval": retry_interval,
        "server": local_matrix_servers[0],
        "server_name": local_matrix_servers[0].netloc,
        "available_servers": [local_matrix_servers[0]],
        "private_rooms": private_rooms,
    })
    transport._client.api.retry_timeout = 0
    transport._send_raw = MagicMock()
    raiden_service = MockRaidenService(None)
    raiden_service.config = dict(services=dict(monitoring_enabled=True))

    transport.start(raiden_service, raiden_service.message_handler, None)

    pfs_room_name = make_room_alias(transport.network_id,
                                    PATH_FINDING_BROADCASTING_ROOM)
    pfs_room = transport._global_rooms.get(pfs_room_name)
    assert isinstance(pfs_room, Room)
    pfs_room.send_text = MagicMock(spec=pfs_room.send_text)

    raiden_service.transport = transport
    transport.log = MagicMock()

    balance_proof = factories.create(HOP1_BALANCE_PROOF)
    channel_state = factories.create(factories.NettingChannelStateProperties())
    channel_state.our_state.balance_proof = balance_proof
    channel_state.partner_state.balance_proof = balance_proof
    monkeypatch.setattr(
        raiden.transfer.views,
        "get_channelstate_by_canonical_identifier",
        lambda *a, **kw: channel_state,
    )
    update_path_finding_service_from_balance_proof(
        raiden=raiden_service,
        chain_state=None,
        new_balance_proof=balance_proof)
    gevent.idle()

    with gevent.Timeout(2):
        while pfs_room.send_text.call_count < 1:
            gevent.idle()
    assert pfs_room.send_text.call_count == 1
    transport.stop()
    transport.get()
Exemplo n.º 13
0
def test_monitoring_global_messages(
    local_matrix_servers,
    private_rooms,
    retry_interval,
    retries_before_backoff,
):
    """
    Test that RaidenService sends RequestMonitoring messages to global
    MONITORING_BROADCASTING_ROOM room on newly received balance proofs.
    """
    transport = MatrixTransport({
        'global_rooms': ['discovery', MONITORING_BROADCASTING_ROOM],
        'retries_before_backoff':
        retries_before_backoff,
        'retry_interval':
        retry_interval,
        'server':
        local_matrix_servers[0],
        'server_name':
        local_matrix_servers[0].netloc,
        'available_servers': [local_matrix_servers[0]],
        'private_rooms':
        private_rooms,
    })
    transport._client.api.retry_timeout = 0
    transport._send_raw = MagicMock()
    raiden_service = MockRaidenService(None)
    raiden_service.config = dict(services=dict(monitoring_enabled=True))

    transport.start(
        raiden_service,
        raiden_service.message_handler,
        None,
    )

    ms_room_name = make_room_alias(transport.network_id,
                                   MONITORING_BROADCASTING_ROOM)
    ms_room = transport._global_rooms.get(ms_room_name)
    assert isinstance(ms_room, Room)
    ms_room.send_text = MagicMock(spec=ms_room.send_text)

    raiden_service.transport = transport
    transport.log = MagicMock()
    balance_proof = make_balance_proof(signer=LocalSigner(HOP1_KEY), amount=1)
    update_monitoring_service_from_balance_proof(
        raiden_service,
        balance_proof,
    )
    gevent.idle()

    assert ms_room.send_text.call_count == 1
    transport.stop()
    transport.get()
Exemplo n.º 14
0
    def join_global_rooms(
        self, client: GMatrixClient,
        available_servers: Sequence[str] = ()) -> None:
        """Join or create a global public room with given name on all available servers.
        If global rooms are not found, create a public room with the name on each server.

        Params:
            client: matrix-python-sdk client instance
            servers: optional: sequence of known/available servers to try to find the room in
        """
        suffix = self.service_room_suffix
        room_alias_prefix = make_room_alias(self.chain_id, suffix)

        parsed_servers = [
            urlparse(s).netloc for s in available_servers
            if urlparse(s).netloc not in {None, ""}
        ]

        for server in parsed_servers:
            room_alias_full = f"#{room_alias_prefix}:{server}"
            log.debug(f"Trying to join {suffix} room",
                      room_alias_full=room_alias_full)
            try:
                broadcast_room = client.join_room(room_alias_full)
                log.debug(f"Joined {suffix} room", room=broadcast_room)
                self.broadcast_rooms.append(broadcast_room)
            except MatrixRequestError as ex:
                if ex.code != 404:
                    log.debug(
                        f"Could not join {suffix} room, trying to create one",
                        room_alias_full=room_alias_full,
                    )
                    try:
                        broadcast_room = client.create_room(room_alias_full,
                                                            is_public=True)
                        log.debug(f"Created {suffix} room",
                                  room=broadcast_room)
                        self.broadcast_rooms.append(broadcast_room)
                    except MatrixRequestError:
                        log.debug(
                            f"Could neither join nor create a {suffix} room",
                            room_alias_full=room_alias_full,
                        )
                        raise TransportError(
                            f"Could neither join nor create a {suffix} room")

                else:
                    log.debug(
                        f"Could not join {suffix} room",
                        room_alias_full=room_alias_full,
                        _exception=ex,
                    )
                    raise
Exemplo n.º 15
0
def test_matrix_send_global(
    local_matrix_servers,
    retries_before_backoff,
    retry_interval,
    private_rooms,
):
    transport = MatrixTransport({
        'global_rooms': ['discovery', MONITORING_BROADCASTING_ROOM],
        'retries_before_backoff':
        retries_before_backoff,
        'retry_interval':
        retry_interval,
        'server':
        local_matrix_servers[0],
        'server_name':
        local_matrix_servers[0].netloc,
        'available_servers': [local_matrix_servers[0]],
        'private_rooms':
        private_rooms,
    })
    transport.start(MockRaidenService(None), MessageHandler(set()), '')
    gevent.idle()

    ms_room_name = make_room_alias(transport.network_id,
                                   MONITORING_BROADCASTING_ROOM)
    ms_room = transport._global_rooms.get(ms_room_name)
    assert isinstance(ms_room, Room)

    ms_room.send_text = MagicMock(spec=ms_room.send_text)

    for i in range(5):
        message = Processed(message_identifier=i)
        transport._raiden_service.sign(message)
        transport.send_global(
            MONITORING_BROADCASTING_ROOM,
            message,
        )
    transport._spawn(transport._global_send_worker)

    gevent.idle()

    assert ms_room.send_text.call_count >= 1
    # messages could have been bundled
    call_args_str = ' '.join(
        str(arg) for arg in ms_room.send_text.call_args_list)
    for i in range(5):
        assert f'"message_identifier": {i}' in call_args_str

    transport.stop()
    transport.get()
Exemplo n.º 16
0
def monitor_server_presence(server: str, signer: Signer,
                            network_names: List[str], stop_event: Event):
    server_name = urlparse(server).netloc
    client = make_client(lambda x: False, lambda x: None, [server])
    login(client=client, signer=signer)
    client.add_presence_listener(partial(log_presence, server))
    client.start_listener_thread(30_000, 1_000)
    for network_name in network_names:
        discovery_room_alias = make_room_alias(CHAINNAME_TO_ID[network_name],
                                               DISCOVERY_DEFAULT_ROOM)

        discovery_room = join_broadcast_room(
            client, f"#{discovery_room_alias}:{server_name}")
    log.info("Monitoring started", server=server)
    stop_event.wait()
    client.stop()
Exemplo n.º 17
0
def test_matrix_broadcast(
    local_matrix_servers,
    retries_before_backoff,
    retry_interval_initial,
    retry_interval_max,
    broadcast_rooms,
):
    transport = MatrixTransport(
        config=MatrixTransportConfig(
            broadcast_rooms=broadcast_rooms,
            retries_before_backoff=retries_before_backoff,
            retry_interval_initial=retry_interval_initial,
            retry_interval_max=retry_interval_max,
            server=local_matrix_servers[0],
            available_servers=[local_matrix_servers[0]],
        ),
        environment=Environment.DEVELOPMENT,
    )
    transport.start(MockRaidenService(None), [], "")
    gevent.idle()

    ms_room_name = make_room_alias(transport.chain_id,
                                   MONITORING_BROADCASTING_ROOM)
    ms_room = transport._broadcast_rooms.get(ms_room_name)
    assert isinstance(ms_room, Room)

    ms_room.send_text = MagicMock(spec=ms_room.send_text)

    for i in range(5):
        message = Processed(message_identifier=i, signature=EMPTY_SIGNATURE)
        transport._raiden_service.sign(message)
        transport.broadcast(MONITORING_BROADCASTING_ROOM, message)
    transport._schedule_new_greenlet(transport._broadcast_worker)

    gevent.idle()

    assert ms_room.send_text.call_count >= 1
    # messages could have been bundled
    call_args_str = " ".join(
        str(arg) for arg in ms_room.send_text.call_args_list)
    for i in range(5):
        assert f'"message_identifier": "{i}"' in call_args_str

    transport.stop()
    transport.greenlet.get()
Exemplo n.º 18
0
def test_matrix_send_global(
    local_matrix_servers,
    retries_before_backoff,
    retry_interval,
    private_rooms,
):
    transport = MatrixTransport({
        'global_rooms': ['discovery', 'monitoring'],
        'retries_before_backoff': retries_before_backoff,
        'retry_interval': retry_interval,
        'server': local_matrix_servers[0],
        'server_name': local_matrix_servers[0].netloc,
        'available_servers': [local_matrix_servers[0]],
        'private_rooms': private_rooms,
    })
    transport.start(MockRaidenService(None), MessageHandler(set()), '')
    gevent.idle()

    ms_room_name = make_room_alias(transport.network_id, 'monitoring')
    ms_room = transport._global_rooms.get(ms_room_name)
    assert isinstance(ms_room, Room)

    ms_room.send_text = MagicMock(spec=ms_room.send_text)

    for i in range(5):
        message = Processed(i)
        transport._raiden_service.sign(message)
        transport.send_global(
            'monitoring',
            message,
        )

    gevent.idle()

    assert ms_room.send_text.call_count == 5

    # unknown room suffix is an error
    with pytest.raises(AssertionError):
        transport.send_global(
            'unknown_suffix',
            Processed(10),
        )

    transport.stop()
    transport.get()
Exemplo n.º 19
0
def test_matrix_send_global(
    local_matrix_servers,
    retries_before_backoff,
    retry_interval,
    private_rooms,
):
    transport = MatrixTransport({
        'global_rooms': ['discovery', MONITORING_BROADCASTING_ROOM],
        'retries_before_backoff':
        retries_before_backoff,
        'retry_interval':
        retry_interval,
        'server':
        local_matrix_servers[0],
        'server_name':
        local_matrix_servers[0].netloc,
        'available_servers': [local_matrix_servers[0]],
        'private_rooms':
        private_rooms,
    })
    transport.start(MockRaidenService(None), MessageHandler(set()), '')
    gevent.idle()

    ms_room_name = make_room_alias(transport.network_id,
                                   MONITORING_BROADCASTING_ROOM)
    ms_room = transport._global_rooms.get(ms_room_name)
    assert isinstance(ms_room, Room)

    ms_room.send_text = MagicMock(spec=ms_room.send_text)

    for i in range(5):
        message = Processed(i)
        transport._raiden_service.sign(message)
        transport.send_global(
            MONITORING_BROADCASTING_ROOM,
            message,
        )

    gevent.idle()

    assert ms_room.send_text.call_count == 5

    transport.stop()
    transport.get()
Exemplo n.º 20
0
def test_matrix_send_global(local_matrix_servers, retries_before_backoff,
                            retry_interval, private_rooms, global_rooms):
    transport = MatrixTransport({
        "global_rooms":
        global_rooms + [MONITORING_BROADCASTING_ROOM],
        "retries_before_backoff":
        retries_before_backoff,
        "retry_interval":
        retry_interval,
        "server":
        local_matrix_servers[0],
        "server_name":
        local_matrix_servers[0].netloc,
        "available_servers": [local_matrix_servers[0]],
        "private_rooms":
        private_rooms,
    })
    transport.start(MockRaidenService(None), MessageHandler(set()), "")
    gevent.idle()

    ms_room_name = make_room_alias(transport.chain_id,
                                   MONITORING_BROADCASTING_ROOM)
    ms_room = transport._global_rooms.get(ms_room_name)
    assert isinstance(ms_room, Room)

    ms_room.send_text = MagicMock(spec=ms_room.send_text)

    for i in range(5):
        message = Processed(message_identifier=i, signature=EMPTY_SIGNATURE)
        transport._raiden_service.sign(message)
        transport.send_global(MONITORING_BROADCASTING_ROOM, message)
    transport._schedule_new_greenlet(transport._global_send_worker)

    gevent.idle()

    assert ms_room.send_text.call_count >= 1
    # messages could have been bundled
    call_args_str = " ".join(
        str(arg) for arg in ms_room.send_text.call_args_list)
    for i in range(5):
        assert f'"message_identifier": "{i}"' in call_args_str

    transport.stop()
    transport.get()
Exemplo n.º 21
0
def test_assumption_matrix_returns_same_id_for_same_filter_payload(chain_id, local_matrix_servers):
    """
    Test that for duplicate filter payload, the matrix server would just
    return the existing filter ID rather than creating a new filter and returning
    a new ID. This means that no cleanup for previously created filters
    is required as filtes are re-used.
    """
    client, _ = create_logged_in_client(local_matrix_servers[0])

    room_alias = make_room_alias(chain_id, "broadcast_test")

    broadcast_room = client.create_room(room_alias, is_public=True)

    assert client._sync_filter_id is None

    first_sync_filter_id = client.create_sync_filter(not_rooms=[broadcast_room])

    # Try again and make sure the filter has the same ID
    second_sync_filter_id = client.create_sync_filter(not_rooms=[broadcast_room])
    assert first_sync_filter_id == second_sync_filter_id
Exemplo n.º 22
0
    def _start_client(self) -> None:
        try:
            login_or_register(
                self.client,
                signer=LocalSigner(private_key=decode_hex(self.private_key)))
        except (MatrixRequestError, ValueError):
            raise ConnectionError("Could not login/register to matrix.")

        try:
            room_name = make_room_alias(self.chain_id,
                                        self.service_room_suffix)
            self.broadcast_room = join_global_room(
                client=self.client,
                name=room_name,
                servers=self.available_servers)
        except (MatrixRequestError, TransportError):
            raise ConnectionError(
                "Could not join monitoring broadcasting room.")

        self.broadcast_room.add_listener(self._handle_message,
                                         "m.room.message")

        # Signal that startup is finished
        self.startup_finished.set()
Exemplo n.º 23
0
def smoketest(
    ctx: Context, debug: bool, eth_client: EthClient, report_path: Optional[str]
) -> None:
    """ Test, that the raiden installation is sane. """
    from raiden.tests.utils.smoketest import (
        setup_raiden,
        run_smoketest,
        setup_matrix_for_smoketest,
        setup_testchain_for_smoketest,
    )
    from raiden.tests.utils.transport import make_requests_insecure, ParsedURL

    step_count = 8
    step = 0
    stdout = sys.stdout
    raiden_stdout = StringIO()

    assert ctx.parent, MYPY_ANNOTATION
    environment_type = ctx.parent.params["environment_type"]
    transport = ctx.parent.params["transport"]
    disable_debug_logfile = ctx.parent.params["disable_debug_logfile"]
    matrix_server = ctx.parent.params["matrix_server"]

    if transport != "matrix":
        raise RuntimeError(f"Invalid transport type '{transport}'")

    if report_path is None:
        report_file = mktemp(suffix=".log")
    else:
        report_file = report_path

    make_requests_insecure()
    urllib3.disable_warnings(InsecureRequestWarning)

    click.secho(f"Report file: {report_file}", fg="yellow")

    configure_logging(
        logger_level_config={"": "DEBUG"},
        log_file=report_file,
        disable_debug_logfile=disable_debug_logfile,
    )

    def append_report(subject: str, data: Optional[AnyStr] = None) -> None:
        with open(report_file, "a", encoding="UTF-8") as handler:
            handler.write(f'{f" {subject.upper()} ":=^80}{os.linesep}')
            if data is not None:
                write_data: str
                if isinstance(data, bytes):
                    write_data = data.decode()
                else:
                    write_data = data
                handler.writelines([write_data + os.linesep])

    append_report("Raiden version", json.dumps(get_system_spec()))
    append_report("Raiden log")

    def print_step(description: str, error: bool = False) -> None:
        nonlocal step
        step += 1
        click.echo(
            "{} {}".format(
                click.style(f"[{step}/{step_count}]", fg="blue"),
                click.style(description, fg="green" if not error else "red"),
            ),
            file=stdout,
        )

    contracts_version = RAIDEN_CONTRACT_VERSION

    try:
        free_port_generator = get_free_port()
        ethereum_nodes = None

        datadir = mkdtemp()
        testchain_manager: ContextManager[Dict[str, Any]] = setup_testchain_for_smoketest(
            eth_client=eth_client,
            print_step=print_step,
            free_port_generator=free_port_generator,
            base_datadir=datadir,
            base_logdir=datadir,
        )
        matrix_manager: ContextManager[
            List[Tuple[ParsedURL, HTTPExecutor]]
        ] = setup_matrix_for_smoketest(
            print_step=print_step,
            free_port_generator=free_port_generator,
            broadcast_rooms_aliases=[
                make_room_alias(NETWORKNAME_TO_ID["smoketest"], DISCOVERY_DEFAULT_ROOM),
                make_room_alias(NETWORKNAME_TO_ID["smoketest"], PATH_FINDING_BROADCASTING_ROOM),
            ],
        )

        # Do not redirect the stdout on a debug session, otherwise the REPL
        # will also be redirected
        if debug:
            stdout_manager = contextlib.nullcontext()
        else:
            stdout_manager = contextlib.redirect_stdout(raiden_stdout)

        with stdout_manager, testchain_manager as testchain, matrix_manager as server_urls:
            result = setup_raiden(
                transport=transport,
                matrix_server=matrix_server,
                print_step=print_step,
                contracts_version=contracts_version,
                eth_client=testchain["eth_client"],
                eth_rpc_endpoint=testchain["eth_rpc_endpoint"],
                web3=testchain["web3"],
                base_datadir=testchain["base_datadir"],
                keystore=testchain["keystore"],
            )

            args = result["args"]
            contract_addresses = result["contract_addresses"]
            ethereum_nodes = testchain["node_executors"]
            token = result["token"]

            port = next(free_port_generator)

            args["api_address"] = f"localhost:{port}"
            args["environment_type"] = environment_type

            # Matrix server
            # TODO: do we need more than one here?
            first_server = server_urls[0]
            args["matrix_server"] = first_server[0]
            args["one_to_n_contract_address"] = "0x" + "1" * 40
            args["routing_mode"] = RoutingMode.LOCAL
            args["flat_fee"] = ()
            args["proportional_fee"] = ()
            args["proportional_imbalance_fee"] = ()

            for option_ in run.params:
                if option_.name in args.keys():
                    args[option_.name] = option_.process_value(ctx, args[option_.name])
                else:
                    args[option_.name] = option_.default

            try:
                run_smoketest(
                    print_step=print_step,
                    args=args,
                    contract_addresses=contract_addresses,
                    token=token,
                )
            finally:
                if ethereum_nodes:
                    for node_executor in ethereum_nodes:
                        node = node_executor.process
                        node.send_signal(signal.SIGINT)

                        try:
                            node.wait(10)
                        except TimeoutExpired:
                            print_step("Ethereum node shutdown unclean, check log!", error=True)
                            node.kill()

                        if isinstance(node_executor.stdio, tuple):
                            logfile = node_executor.stdio[1]
                            logfile.flush()
                            logfile.seek(0)
                            append_report("Ethereum Node log output", logfile.read())

        append_report("Raiden Node stdout", raiden_stdout.getvalue())

    except:  # noqa pylint: disable=bare-except
        if debug:
            import pdb

            pdb.post_mortem()  # pylint: disable=no-member

        error = traceback.format_exc()
        append_report("Smoketest execution error", error)
        print_step("Smoketest execution error", error=True)
        success = False
    else:
        print_step(f"Smoketest successful")
        success = True

    if not success:
        sys.exit(1)
Exemplo n.º 24
0
def test_admin_is_allowed_to_kick(matrix_transports, local_matrix_servers):
    server_name = local_matrix_servers[0].netloc
    admin_credentials = get_admin_credentials(server_name)
    broadcast_room_name = make_room_alias(UNIT_CHAIN_ID, "discovery")
    broadcast_room_alias = f"#{broadcast_room_name}:{server_name}"

    transport0, transport1, transport2 = matrix_transports

    raiden_service0 = MockRaidenService()
    raiden_service1 = MockRaidenService()
    # start transports to join broadcast rooms as normal users
    transport0.start(raiden_service0, [], None)
    transport1.start(raiden_service1, [], None)
    # admin login using raiden.tests.utils.transport.AdminAuthProvider
    admin_client = GMatrixClient(ignore_messages, ignore_member_join,
                                 local_matrix_servers[0])
    admin_client.login(admin_credentials["username"],
                       admin_credentials["password"],
                       sync=False)
    room_id = admin_client.join_room(broadcast_room_alias).room_id

    # get members of room and filter not kickable users (power level 100)
    def _get_joined_room_members():
        membership_events = admin_client.api.get_room_members(room_id)["chunk"]
        member_ids = [
            event["state_key"] for event in membership_events
            if event["content"]["membership"] == "join"
        ]
        return set(member_ids)

    members = _get_joined_room_members()
    power_levels_event = admin_client.api.get_power_levels(room_id)
    admin_user_ids = [
        key for key, value in power_levels_event["users"].items()
        if value >= 50
    ]
    non_admin_user_ids = [
        member for member in members if member not in admin_user_ids
    ]
    # transport0 and transport1 should still be in non_admin_user_ids
    assert len(non_admin_user_ids) > 1
    kick_user_id = non_admin_user_ids[0]

    # kick one user
    admin_client.api.kick_user(room_id, kick_user_id)

    # Assert missing member
    members_after_kick = _get_joined_room_members()
    assert len(members_after_kick) == len(members) - 1
    members_after_kick.add(kick_user_id)
    assert members_after_kick == members

    # check assumption that new user does not receive presence
    raiden_service2 = MockRaidenService()

    def local_presence_listener(event, event_id):  # pylint: disable=unused-argument
        assert event["sender"] != kick_user_id

    transport2._client.add_presence_listener(local_presence_listener)
    transport2.start(raiden_service2, [], None)

    transport2.stop()

    # rejoin and assert that normal user cannot kick
    kicked_transport = transport0 if transport0._user_id == kick_user_id else transport1
    kicked_transport._client.join_room(broadcast_room_alias)

    with pytest.raises(MatrixRequestError):
        kicked_transport._client.api.kick_user(room_id, non_admin_user_ids[1])
Exemplo n.º 25
0
def test_assumption_federation_works_after_original_server_goes_down(
        chain_id, local_matrix_servers_with_executor):
    """ Check that a federated broadcast room keeps working after the original server goes down.

    This creates a federation of three matrix servers and a client for each.
    It then checks that all nodes receive messages from the broadcast room.
    Then the first matrix server is shut down and a second message send to
    the broadcast room, which should arrive at both remaining clients.
    """
    original_server_url = urlsplit(
        local_matrix_servers_with_executor[0][0]).netloc

    room_alias = make_room_alias(chain_id, "broadcast_test")
    room_name_full = f"#{room_alias}:{original_server_url}"

    user_room_creator, _ = create_logged_in_client(
        local_matrix_servers_with_executor[0][0])
    original_room: Room = user_room_creator.create_room(room_alias,
                                                        is_public=True)
    user_room_creator.start_listener_thread(
        timeout_ms=DEFAULT_TRANSPORT_MATRIX_SYNC_TIMEOUT,
        latency_ms=DEFAULT_TRANSPORT_MATRIX_SYNC_LATENCY,
    )

    user_federated_1, _ = create_logged_in_client(
        local_matrix_servers_with_executor[1][0])
    room_server1 = join_broadcast_room(user_federated_1, room_name_full)
    user_federated_1.rooms[room_server1.room_id] = room_server1
    user_federated_1.start_listener_thread(
        timeout_ms=DEFAULT_TRANSPORT_MATRIX_SYNC_TIMEOUT,
        latency_ms=DEFAULT_TRANSPORT_MATRIX_SYNC_LATENCY,
    )

    user_federated_2, _ = create_logged_in_client(
        local_matrix_servers_with_executor[2][0])
    room_server2 = join_broadcast_room(user_federated_2, room_name_full)
    user_federated_2.rooms[room_server2.room_id] = room_server2
    user_federated_2.start_listener_thread(
        timeout_ms=DEFAULT_TRANSPORT_MATRIX_SYNC_TIMEOUT,
        latency_ms=DEFAULT_TRANSPORT_MATRIX_SYNC_LATENCY,
    )

    received = {}

    def handle_message(node_id: int, _room: Room, event: Dict[str, Any]):
        nonlocal received
        received[node_id] = event["content"]["body"]

    original_room.add_listener(partial(handle_message, 0), "m.room.message")
    room_server1.add_listener(partial(handle_message, 1), "m.room.message")
    room_server2.add_listener(partial(handle_message, 2), "m.room.message")

    # Full federation, send a message to check it works
    original_room.send_text("Message1")

    while not len(received) == 3:
        gevent.sleep(0.1)

    assert sorted(received.keys()) == [0, 1, 2]
    assert all("Message1" == m for m in received.values())

    # Shut down the room_creator before we stop the server
    user_room_creator.stop_listener_thread()
    # Shutdown server 0, the original creator of the room
    server: HTTPExecutor = local_matrix_servers_with_executor[0][1]
    server.stop()

    # Send message from client 1, check that client 2 receives it
    received = {}
    room_server1.send_text("Message2")

    while not len(received) == 2:
        gevent.sleep(0.1)

    assert sorted(received.keys()) == [1, 2]
    assert all("Message2" == m for m in received.values())

    # Shut down longrunning threads
    user_federated_1.stop_listener_thread()
    user_federated_2.stop_listener_thread()
def test_pfs_broadcast_messages(
    local_matrix_servers,
    retry_interval_initial,
    retry_interval_max,
    retries_before_backoff,
    monkeypatch,
    broadcast_rooms,
    route_mode,
):
    """
    Test that RaidenService broadcasts PFSCapacityUpdate messages to
    PATH_FINDING_BROADCASTING_ROOM room on newly received balance proofs.
    """
    transport = MatrixTransport(
        config=MatrixTransportConfig(
            broadcast_rooms=broadcast_rooms,
            retries_before_backoff=retries_before_backoff,
            retry_interval_initial=retry_interval_initial,
            retry_interval_max=retry_interval_max,
            server=local_matrix_servers[0],
            available_servers=[local_matrix_servers[0]],
        ),
        environment=Environment.DEVELOPMENT,
    )
    transport._client.api.retry_timeout = 0
    transport._send_raw = MagicMock()
    raiden_service = MockRaidenService(None)
    raiden_service.config.services.monitoring_enabled = True
    raiden_service.routing_mode = route_mode

    transport.start(raiden_service, [], None)

    pfs_room_name = make_room_alias(transport.chain_id, PATH_FINDING_BROADCASTING_ROOM)
    pfs_room = transport._broadcast_rooms.get(pfs_room_name)
    assert isinstance(pfs_room, Room)
    pfs_room.send_text = MagicMock(spec=pfs_room.send_text)

    raiden_service.transport = transport
    transport.log = MagicMock()

    # send PFSCapacityUpdate
    balance_proof = factories.create(HOP1_BALANCE_PROOF)
    channel_state = factories.create(factories.NettingChannelStateProperties())
    channel_state.our_state.balance_proof = balance_proof
    channel_state.partner_state.balance_proof = balance_proof
    monkeypatch.setattr(
        raiden.transfer.views,
        "get_channelstate_by_canonical_identifier",
        lambda *a, **kw: channel_state,
    )
    send_pfs_update(raiden=raiden_service, canonical_identifier=balance_proof.canonical_identifier)
    gevent.idle()
    with gevent.Timeout(2):
        while pfs_room.send_text.call_count < 1:
            gevent.idle()
    assert pfs_room.send_text.call_count == 1

    # send PFSFeeUpdate
    channel_state = factories.create(factories.NettingChannelStateProperties())
    fee_update = PFSFeeUpdate.from_channel_state(channel_state)
    fee_update.sign(raiden_service.signer)
    raiden_service.transport.broadcast(PATH_FINDING_BROADCASTING_ROOM, fee_update)
    with gevent.Timeout(2):
        while pfs_room.send_text.call_count < 2:
            gevent.idle()
    assert pfs_room.send_text.call_count == 2
    msg_data = json.loads(pfs_room.send_text.call_args[0][0])
    assert msg_data["type"] == "PFSFeeUpdate"

    transport.stop()
    transport.greenlet.get()
def test_monitoring_broadcast_messages(
    local_matrix_servers,
    retry_interval_initial,
    retry_interval_max,
    retries_before_backoff,
    monkeypatch,
    broadcast_rooms,
):
    """
    Test that RaidenService broadcast RequestMonitoring messages to
    MONITORING_BROADCASTING_ROOM room on newly received balance proofs.
    """
    transport = MatrixTransport(
        config=MatrixTransportConfig(
            broadcast_rooms=broadcast_rooms + [MONITORING_BROADCASTING_ROOM],
            retries_before_backoff=retries_before_backoff,
            retry_interval_initial=retry_interval_initial,
            retry_interval_max=retry_interval_max,
            server=local_matrix_servers[0],
            available_servers=[local_matrix_servers[0]],
        ),
        environment=Environment.DEVELOPMENT,
    )
    transport._client.api.retry_timeout = 0
    transport._send_raw = MagicMock()
    raiden_service = MockRaidenService(None)
    raiden_service.config = RaidenConfig(
        chain_id=1234,
        environment_type=Environment.DEVELOPMENT,
        services=ServiceConfig(monitoring_enabled=True),
    )

    transport.start(raiden_service, [], None)

    ms_room_name = make_room_alias(transport.chain_id, MONITORING_BROADCASTING_ROOM)
    ms_room = transport._broadcast_rooms.get(ms_room_name)
    assert isinstance(ms_room, Room)
    ms_room.send_text = MagicMock(spec=ms_room.send_text)

    raiden_service.transport = transport
    transport.log = MagicMock()

    balance_proof = factories.create(HOP1_BALANCE_PROOF)
    channel_state = factories.create(factories.NettingChannelStateProperties())
    channel_state.our_state.balance_proof = balance_proof
    channel_state.partner_state.balance_proof = balance_proof
    monkeypatch.setattr(
        raiden.transfer.views,
        "get_channelstate_by_canonical_identifier",
        lambda *a, **kw: channel_state,
    )
    monkeypatch.setattr(raiden.transfer.channel, "get_balance", lambda *a, **kw: 123)
    raiden_service.user_deposit.effective_balance.return_value = MONITORING_REWARD

    update_monitoring_service_from_balance_proof(
        raiden=raiden_service,
        chain_state=None,
        new_balance_proof=balance_proof,
        non_closing_participant=HOP1,
    )
    gevent.idle()

    with gevent.Timeout(2):
        while ms_room.send_text.call_count < 1:
            gevent.idle()
    assert ms_room.send_text.call_count == 1

    transport.stop()
    transport.greenlet.get()
def test_transport_does_not_receive_broadcast_rooms_updates(matrix_transports):
    """ Ensure that matrix server-side filters take effect on sync for broadcast room content.

    The test sets up 3 transports where:
    Transport0 sends a message to the PFS broadcast room.
    Transport1 has an active sync filter ID that filters out broadcast room messages.
    Transport2 has NO active sync filter so it receives everything.

    The test should wait for Transport0 to send a message and then
    verify that Transport2 has received the message while Transport1
    did not.
    """
    raiden_service0 = MockRaidenService(None)
    raiden_service1 = MockRaidenService(None)
    raiden_service2 = MockRaidenService(None)

    transport0, transport1, transport2 = matrix_transports

    received_sync_events: Dict[str, List[Dict[str, Any]]] = {"t1": [], "t2": []}

    def _handle_responses(
        name: str, responses: List[Dict[str, Any]], first_sync: bool = False
    ):  # pylint: disable=unused-argument
        for response in responses:
            joined_rooms = response.get("rooms", {}).get("join", {})
            for joined_room in joined_rooms.values():
                timeline_events = joined_room.get("timeline").get("events", [])
                message_events = [
                    event for event in timeline_events if event["type"] == "m.room.message"
                ]
                received_sync_events[name].extend(message_events)

    # Replace the transport's handle_response method
    # Should be able to detect if sync delivered a message
    transport1._client._handle_responses = partial(_handle_responses, "t1")
    transport2._client._handle_responses = partial(_handle_responses, "t2")

    transport0.start(raiden_service0, [], None)
    transport1.start(raiden_service1, [], None)
    transport2.start(raiden_service2, [], None)

    pfs_broadcast_room_alias = make_room_alias(transport0.chain_id, PATH_FINDING_BROADCASTING_ROOM)
    pfs_broadcast_room_t0 = transport0._broadcast_rooms[pfs_broadcast_room_alias]

    # Get the sync helper to control flow of asynchronous syncs
    sync_progress1 = transport1._client.sync_progress
    sync_progress2 = transport2._client.sync_progress

    # Reset transport2 sync filter identifier so that
    # we can receive broadcast messages
    assert transport2._client._sync_filter_id is not None
    transport2._client._sync_filter_id = None

    # get the last sync tokens to control the processed state later
    last_synced_token1 = sync_progress1.last_synced
    # for T2 we need to make sure that the current sync used the filter reset -> wait()
    last_synced_token2 = sync_progress2.synced_event.wait()[0]
    # Send another message to the broadcast room, if transport1 listens on the room it will
    # throw an exception
    message = Processed(message_identifier=1, signature=EMPTY_SIGNATURE)
    message_text = MessageSerializer.serialize(message)
    pfs_broadcast_room_t0.send_text(message_text)

    # wait for the current tokens to be processed + 1 additional sync
    # this must be done because the message should be in the sync after the stored token
    sync_progress1.wait_for_processed(last_synced_token1, 1)
    sync_progress2.wait_for_processed(last_synced_token2, 1)

    # Transport2 should have received the message
    assert received_sync_events["t2"]
    event_body = received_sync_events["t2"][0]["content"]["body"]
    assert message_text == event_body

    # Transport1 used the filter so nothing was received
    assert not received_sync_events["t1"]
Exemplo n.º 29
0
def test_make_room_alias():
    assert make_room_alias(1, "discovery") == "raiden_mainnet_discovery"
    assert make_room_alias(
        3, "0xdeadbeef",
        "0xabbacada") == "raiden_ropsten_0xdeadbeef_0xabbacada"
    assert make_room_alias(1337, "monitoring") == "raiden_1337_monitoring"
Exemplo n.º 30
0
def test_pfs_global_messages(
    matrix_transports,
    monkeypatch,
):
    """
    Test that `update_pfs` from `RaidenEventHandler` sends balance proof updates to the global
    PATH_FINDING_BROADCASTING_ROOM room on Send($BalanceProof)* events, i.e. events, that send
    a new balance proof to the channel partner.
    """
    transport = matrix_transports[0]
    transport._client.api.retry_timeout = 0
    transport._send_raw = MagicMock()
    raiden_service = MockRaidenService(None)

    transport.start(
        raiden_service,
        raiden_service.message_handler,
        None,
    )

    pfs_room_name = make_room_alias(transport.network_id,
                                    PATH_FINDING_BROADCASTING_ROOM)
    pfs_room = transport._global_rooms.get(pfs_room_name)
    assert isinstance(pfs_room, Room)
    pfs_room.send_text = MagicMock(spec=pfs_room.send_text)

    raiden_service.transport = transport
    transport.log = MagicMock()

    # create mock events that should trigger a send
    lock = make_lock()
    hash_time_lock = HashTimeLockState(lock.amount, lock.expiration,
                                       lock.secrethash)

    def make_unsigned_balance_proof(nonce):
        return BalanceProofUnsignedState.from_dict(
            make_balance_proof(nonce=nonce,
                               signer=LocalSigner(HOP1_KEY),
                               amount=1).to_dict(), )

    transfer1 = LockedTransferUnsignedState(
        balance_proof=make_unsigned_balance_proof(nonce=1),
        payment_identifier=1,
        token=b'1',
        lock=hash_time_lock,
        target=HOP1,
        initiator=HOP1,
    )
    transfer2 = LockedTransferUnsignedState(
        balance_proof=make_unsigned_balance_proof(nonce=2),
        payment_identifier=1,
        token=b'1',
        lock=hash_time_lock,
        target=HOP1,
        initiator=HOP1,
    )

    send_balance_proof_events = [
        SendLockedTransfer(HOP1, 1, 1, transfer1),
        SendRefundTransfer(HOP1, 1, 1, transfer2),
        SendBalanceProof(HOP1, 1, 1, 1, b'1', b'x' * 32,
                         make_unsigned_balance_proof(nonce=3)),
        SendLockExpired(HOP1, 1, make_unsigned_balance_proof(nonce=4),
                        b'x' * 32),
    ]
    for num, event in enumerate(send_balance_proof_events):
        assert event.balance_proof.nonce == num + 1
    # make sure we cover all configured event types
    assert all(event in [type(event) for event in send_balance_proof_events]
               for event in SEND_BALANCE_PROOF_EVENTS)

    event_handler = raiden_event_handler.RaidenEventHandler()

    # let our mock objects pass validation
    channelstate_mock = Mock()
    channelstate_mock.reveal_timeout = 1

    monkeypatch.setattr(
        raiden_event_handler,
        'get_channelstate_by_token_network_and_partner',
        lambda *args, **kwargs: channelstate_mock,
    )
    monkeypatch.setattr(raiden_event_handler, 'state_from_raiden',
                        lambda *args, **kwargs: 1)
    monkeypatch.setattr(event_handler, 'handle_send_lockedtransfer',
                        lambda *args, **kwargs: 1)
    monkeypatch.setattr(event_handler, 'handle_send_refundtransfer',
                        lambda *args, **kwargs: 1)

    # handle the events
    for event in send_balance_proof_events:
        event_handler.on_raiden_event(
            raiden_service,
            event,
        )
    gevent.idle()

    # ensure all events triggered a send for their respective balance_proof
    # matrix transport may concatenate multiple messages send in one interval
    assert pfs_room.send_text.call_count >= 1
    concatenated_call_args = ' '.join(
        str(arg) for arg in pfs_room.send_text.call_args_list)
    assert all(f'"nonce": {i + 1}' in concatenated_call_args
               for i in range(len(SEND_BALANCE_PROOF_EVENTS)))
    transport.stop()
    transport.get()