def test_retryqueue_not_idle_with_messages( mock_matrix: MatrixTransport, retry_interval_initial: float ) -> None: """ Ensure ``RetryQueue``s don't become idle while messages remain in the internal queue. """ retry_queue = mock_matrix._get_retrier(Address(factories.HOP1)) idle_after = RETRY_QUEUE_IDLE_AFTER * retry_interval_initial queue_identifier = QueueIdentifier( recipient=Address(factories.HOP1), canonical_identifier=CANONICAL_IDENTIFIER_UNORDERED_QUEUE, ) retry_queue.enqueue(queue_identifier, [make_message()]) # Without the `all_peers_reachable` fixture, the default reachability will be `UNREACHABLE` # therefore the message will remain in the internal queue indefinitely. # Wait for the idle timeout to expire gevent.sleep(idle_after + (retry_interval_initial * 5)) assert not retry_queue.greenlet.ready() assert retry_queue._idle_since == 0 assert not retry_queue.is_idle retry_queue_2 = mock_matrix._get_retrier(Address(factories.HOP1)) # The first queue has never become idle, therefore the same object must be returned assert retry_queue is retry_queue_2
def run_test_regression_transport_global_queues_are_initialized_on_restart_for_services( raiden_network, number_of_nodes, token_addresses, network_wait, user_deposit_address): app0, app1 = raiden_network app0.config["services"]["monitoring_enabled"] = True # Send a transfer to make sure the state has a balance proof # to publish to the global matrix rooms token_address = token_addresses[0] amount = 10 transfer( initiator_app=app1, target_app=app0, token_address=token_address, amount=amount, identifier=1, timeout=network_wait * number_of_nodes, ) app0.stop() transport = MatrixTransport(app0.config["transport"]["matrix"]) transport.send_async = Mock() transport._send_raw = Mock() old_start_transport = transport.start # Check that the queue is populated before the transport sends it and empties the queue def start_transport(*args, **kwargs): # Before restart the transport's global message queue should be initialized # There should be 2 messages in the global queue. # 1 for the PFS and the other for MS assert len(transport._global_send_queue) == 2 # No other messages were sent at this point transport.send_async.assert_not_called() transport._send_raw.assert_not_called() old_start_transport(*args, **kwargs) transport.start = start_transport raiden_event_handler = RaidenEventHandler() message_handler = MessageHandler() app0_restart = App( config=app0.config, chain=app0.raiden.chain, query_start_block=0, default_registry=app0.raiden.default_registry, default_one_to_n_address=app0.raiden.default_one_to_n_address, default_secret_registry=app0.raiden.default_secret_registry, default_service_registry=app0.raiden.default_service_registry, transport=transport, raiden_event_handler=raiden_event_handler, message_handler=message_handler, discovery=app0.raiden.discovery, user_deposit=app0.raiden.chain.user_deposit(user_deposit_address), ) app0_restart.start()
def mock_matrix(monkeypatch, mock_raiden_service, retry_interval, retries_before_backoff): from raiden.network.transport.matrix.client import GMatrixClient from raiden.network.transport.matrix.utils import UserPresence from raiden.network.transport.matrix import transport as transport_module def make_client_monkey( handle_messages_callback, servers, *args, **kwargs ): # pylint: disable=unused-argument return GMatrixClient(handle_messages_callback, servers[0]) monkeypatch.setattr(User, "get_display_name", lambda _: "random_display_name") monkeypatch.setattr(transport_module, "make_client", make_client_monkey) def mock_get_room_ids_for_address( # pylint: disable=unused-argument klass, address: Address ) -> List[str]: return ["!roomID:server"] def mock_set_room_id_for_address( # pylint: disable=unused-argument self, address: Address, room_id: Optional[str] ): pass def mock_on_messages(messages): # pylint: disable=unused-argument for message in messages: assert message assert message.sender def mock_get_user_presence(self, user_id: str): return UserPresence.ONLINE config = MatrixTransportConfig( broadcast_rooms=[], retries_before_backoff=retries_before_backoff, retry_interval=retry_interval, server="http://none", server_name="none", available_servers=[], ) transport = MatrixTransport(config=config, environment=Environment.DEVELOPMENT) transport._raiden_service = mock_raiden_service transport._stop_event.clear() transport._address_mgr.add_userid_for_address(factories.HOP1, USERID1) transport._client.user_id = USERID0 monkeypatch.setattr( MatrixTransport, "_get_room_ids_for_address", mock_get_room_ids_for_address ) monkeypatch.setattr(MatrixTransport, "_set_room_id_for_address", mock_set_room_id_for_address) monkeypatch.setattr(transport._raiden_service, "on_messages", mock_on_messages) monkeypatch.setattr(GMatrixClient, "get_user_presence", mock_get_user_presence) monkeypatch.setattr(transport._client.api, "leave_room", lambda room_id: None) monkeypatch.setattr(transport._client, "sync_token", "already_synced") return transport
def mock_matrix( monkeypatch, retry_interval, retries_before_backoff, local_matrix_servers, private_rooms, ): from matrix_client.user import User monkeypatch.setattr(User, 'get_display_name', lambda _: 'random_display_name') def mock_get_user(klass, user: Union[User, str]) -> User: return User(None, USERID1) def mock_get_room_ids_for_address( klass, address: Address, filter_private: bool = None, ) -> List[str]: return ['!roomID:server'] def mock_set_room_id_for_address(self, address: Address, room_id: Optional[str]): pass def mock_receive_message(klass, message): # We are just unit testing the matrix transport receive so do nothing assert message def mock_receive_delivered(klass, delivered): # We are just unit testing the matrix transport receive so do nothing assert delivered config = dict( retry_interval=retry_interval, retries_before_backoff=retries_before_backoff, server=local_matrix_servers[0], server_name=local_matrix_servers[0].netloc, available_servers=[], discovery_room='discovery', private_rooms=private_rooms, ) transport = MatrixTransport(config) transport._raiden_service = MockRaidenService() transport._stop_event.clear() transport._address_to_userids[HOP1] = USERID1 monkeypatch.setattr(MatrixTransport, '_get_user', mock_get_user) monkeypatch.setattr( MatrixTransport, '_get_room_ids_for_address', mock_get_room_ids_for_address, ) monkeypatch.setattr(MatrixTransport, '_set_room_id_for_address', mock_set_room_id_for_address) monkeypatch.setattr(MatrixTransport, '_receive_message', mock_receive_message) return transport
def mock_matrix(monkeypatch, retry_interval, retries_before_backoff): from raiden.network.transport.matrix.client import User from raiden.network.transport.matrix import transport as transport_module monkeypatch.setattr(User, "get_display_name", lambda _: "random_display_name") monkeypatch.setattr(transport_module, "make_client", lambda url, *a, **kw: GMatrixClient(url[0])) def mock_get_user(klass, user: Union[User, str]) -> User: # pylint: disable=unused-argument return User(None, USERID1) def mock_get_room_ids_for_address( # pylint: disable=unused-argument klass, address: Address, filter_private: bool = None) -> List[str]: return ["!roomID:server"] def mock_set_room_id_for_address( # pylint: disable=unused-argument self, address: Address, room_id: Optional[str]): pass def mock_receive_message(klass, message): # pylint: disable=unused-argument # We are just unit testing the matrix transport receive so do nothing assert message assert message.sender config = dict( retry_interval=retry_interval, retries_before_backoff=retries_before_backoff, server="http://none", server_name="none", available_servers=[], global_rooms=[], private_rooms=False, ) transport = MatrixTransport(config) transport._raiden_service = MockRaidenService() transport._stop_event.clear() transport._address_mgr.add_userid_for_address(factories.HOP1, USERID1) transport._client.user_id = USERID0 monkeypatch.setattr(MatrixTransport, "_get_user", mock_get_user) monkeypatch.setattr(MatrixTransport, "_get_room_ids_for_address", mock_get_room_ids_for_address) monkeypatch.setattr(MatrixTransport, "_set_room_id_for_address", mock_set_room_id_for_address) monkeypatch.setattr(MatrixTransport, "_receive_message", mock_receive_message) return transport
def test_matrix_discovery_room_offline_server( local_matrix_servers, retries_before_backoff, retry_interval, private_rooms, ): transport = MatrixTransport({ 'discovery_room': 'discovery', 'retries_before_backoff': retries_before_backoff, 'retry_interval': retry_interval, 'server': local_matrix_servers[0], 'server_name': local_matrix_servers[0].netloc, 'available_servers': [local_matrix_servers[0], 'https://localhost:1'], 'private_rooms': private_rooms, }) transport.start(MockRaidenService(None), MessageHandler(set()), '') gevent.sleep(.2) transport.stop() transport.get()
def test_retry_queue_does_not_resend_removed_messages( mock_matrix: MatrixTransport, retry_interval_initial: float) -> None: """ Ensure the ``RetryQueue`` doesn't unnecessarily re-send messages. Messages should only be retried while they are present in the respective Raiden queue. Once they have been removed they should not be sent again. In the past they could have been sent twice. See: https://github.com/raiden-network/raiden/issue/4111 """ # Pretend the Transport greenlet is running mock_matrix.greenlet = True # This is intentionally not using ``MatrixTransport._get_retrier()`` since we don't want the # greenlet to run but instead manually call its `_check_and_send()` method. retry_queue = _RetryQueue(transport=mock_matrix, receiver=Address(factories.HOP1)) message = make_message() serialized_message = MessageSerializer.serialize(message) queue_identifier = QueueIdentifier( recipient=Address(factories.HOP1), canonical_identifier=CANONICAL_IDENTIFIER_UNORDERED_QUEUE, ) retry_queue.enqueue(queue_identifier, [message]) # TODO: Fix the code below, the types are not matching. mock_matrix._queueids_to_queues[queue_identifier] = [message ] # type: ignore with retry_queue._lock: retry_queue._check_and_send() assert len(mock_matrix.sent_messages) == 1 # type: ignore assert (factories.HOP1, serialized_message) in mock_matrix.sent_messages # type: ignore mock_matrix._queueids_to_queues[queue_identifier].clear() # Make sure the retry interval has elapsed gevent.sleep(retry_interval_initial * 5) with retry_queue._lock: # The message has been removed from the raiden queue and should therefore not be sent again retry_queue._check_and_send() assert len(mock_matrix.sent_messages) == 1 # type: ignore
def matrix_transports( local_matrix_servers, retries_before_backoff, retry_interval, private_rooms, number_of_transports, global_rooms, ): transports = [] for transport_index in range(number_of_transports): server = local_matrix_servers[transport_index % len(local_matrix_servers)] transports.append( MatrixTransport({ 'global_rooms': global_rooms, 'retries_before_backoff': retries_before_backoff, 'retry_interval': retry_interval, 'server': server, 'server_name': server.netloc, 'available_servers': local_matrix_servers, 'private_rooms': private_rooms, }), ) yield transports for transport in transports: transport.stop() for transport in transports: transport.get()
def matrix_transports( local_matrix_servers, retries_before_backoff, retry_interval, private_rooms, number_of_transports, global_rooms, ): transports = [] for transport_index in range(number_of_transports): server = local_matrix_servers[transport_index % len(local_matrix_servers)] transports.append( MatrixTransport({ "global_rooms": global_rooms, "retries_before_backoff": retries_before_backoff, "retry_interval": retry_interval, "server": server, "server_name": server.netloc, "available_servers": local_matrix_servers, "private_rooms": private_rooms[transport_index], })) yield transports for transport in transports: transport.stop() for transport in transports: # Calling `get()` on a never started Greenlet will block forever if transport._started: transport.get()
def _setup_matrix(config: Dict[str, Any], routing_mode: RoutingMode) -> MatrixTransport: if config["transport"]["matrix"].get("available_servers") is None: # fetch list of known servers from raiden-network/raiden-tranport repo available_servers_url = DEFAULT_MATRIX_KNOWN_SERVERS[ config["environment_type"]] available_servers = get_matrix_servers(available_servers_url) log.debug("Fetching available matrix servers", available_servers=available_servers) config["transport"]["matrix"]["available_servers"] = available_servers # Add PFS broadcast room when not in privat mode if routing_mode != RoutingMode.PRIVATE: if PATH_FINDING_BROADCASTING_ROOM not in config["transport"]["matrix"][ "broadcast_rooms"]: config["transport"]["matrix"]["broadcast_rooms"].append( PATH_FINDING_BROADCASTING_ROOM) # Add monitoring service broadcast room if enabled if config["services"]["monitoring_enabled"] is True: config["transport"]["matrix"]["broadcast_rooms"].append( MONITORING_BROADCASTING_ROOM) try: transport = MatrixTransport(config["transport"]["matrix"]) except RaidenError as ex: click.secho(f"FATAL: {ex}", fg="red") sys.exit(1) return transport
def setup_matrix( transport_config: MatrixTransportConfig, services_config: ServiceConfig, environment_type: Environment, routing_mode: RoutingMode, ) -> MatrixTransport: if transport_config.server == MATRIX_AUTO_SELECT_SERVER: # fetch list of known servers from raiden-network/raiden-tranport repo available_servers_url = DEFAULT_MATRIX_KNOWN_SERVERS[environment_type] log.debug("Fetching available matrix servers") available_servers = get_matrix_servers(available_servers_url) log.debug("Available matrix servers", available_servers=available_servers) transport_config.available_servers = available_servers # Add PFS broadcast room when not in private mode if routing_mode != RoutingMode.PRIVATE: if PATH_FINDING_BROADCASTING_ROOM not in transport_config.broadcast_rooms: transport_config.broadcast_rooms.append(PATH_FINDING_BROADCASTING_ROOM) # Add monitoring service broadcast room if enabled if services_config.monitoring_enabled is True: transport_config.broadcast_rooms.append(MONITORING_BROADCASTING_ROOM) return MatrixTransport(config=transport_config, environment=environment_type)
def restart_app(app: App, restart_node: RestartNode) -> App: new_transport = MatrixTransport( config=app.raiden.config.transport, environment=app.raiden.config.environment_type) raiden_event_handler = RaidenEventHandler() hold_handler = HoldRaidenEventHandler(raiden_event_handler) app = App( config=app.config, rpc_client=app.raiden.rpc_client, proxy_manager=app.raiden.proxy_manager, query_start_block=BlockNumber(0), default_one_to_n_address=app.raiden.default_one_to_n_address, default_registry=app.raiden.default_registry, default_secret_registry=app.raiden.default_secret_registry, default_service_registry=app.raiden.default_service_registry, default_msc_address=app.raiden.default_msc_address, transport=new_transport, raiden_event_handler=hold_handler, message_handler=MessageHandler(), routing_mode=RoutingMode.PRIVATE, ) restart_node(app) return app
def _setup_matrix(config): if config["transport"]["matrix"].get("available_servers") is None: # fetch list of known servers from raiden-network/raiden-tranport repo available_servers_url = DEFAULT_MATRIX_KNOWN_SERVERS[ config["environment_type"]] available_servers = get_matrix_servers(available_servers_url) log.debug("Fetching available matrix servers", available_servers=available_servers) config["transport"]["matrix"]["available_servers"] = available_servers # TODO: This needs to be adjusted once #3735 gets implemented # Add PFS broadcast room if enabled if config["services"]["pathfinding_service_address"] is not None: if PATH_FINDING_BROADCASTING_ROOM not in config["transport"]["matrix"][ "global_rooms"]: config["transport"]["matrix"]["global_rooms"].append( PATH_FINDING_BROADCASTING_ROOM) # Add monitoring service broadcast room if enabled if config["services"]["monitoring_enabled"] is True: config["transport"]["matrix"]["global_rooms"].append( MONITORING_BROADCASTING_ROOM) try: transport = MatrixTransport(config["transport"]["matrix"]) except RaidenError as ex: click.secho(f"FATAL: {ex}", fg="red") sys.exit(1) return transport
def test_recovery_blockchain_events(raiden_network, restart_node, token_addresses, network_wait): """ Close one of the two raiden apps that have a channel between them, have the counterparty close the channel and then make sure the restarted app sees the change """ app0, app1 = raiden_network token_address = token_addresses[0] app0.raiden.stop() new_transport = MatrixTransport( config=app0.raiden.config.transport, environment=app0.raiden.config.environment_type) app1_api = RaidenAPI(app1.raiden) app1_api.channel_close( registry_address=app0.raiden.default_registry.address, token_address=token_address, partner_address=app0.raiden.address, ) app0.stop() raiden_event_handler = RaidenEventHandler() message_handler = MessageHandler() app0_restart = App( config=app0.config, rpc_client=app0.raiden.rpc_client, proxy_manager=app0.raiden.proxy_manager, query_start_block=BlockNumber(0), default_registry=app0.raiden.default_registry, default_one_to_n_address=app0.raiden.default_one_to_n_address, default_secret_registry=app0.raiden.default_secret_registry, default_service_registry=app0.raiden.default_service_registry, default_msc_address=app0.raiden.default_msc_address, transport=new_transport, raiden_event_handler=raiden_event_handler, message_handler=message_handler, routing_mode=RoutingMode.PRIVATE, ) del app0 # from here on the app0_restart should be used restart_node(app0_restart) wal = app0_restart.raiden.wal assert wal # wait for the nodes' healthcheck to update the network statuses waiting.wait_for_healthy(app0_restart.raiden, app1.raiden.address, network_wait) waiting.wait_for_healthy(app1.raiden, app0_restart.raiden.address, network_wait) restarted_state_changes = wal.storage.get_statechanges_by_range( RANGE_ALL_STATE_CHANGES) assert search_for_item(restarted_state_changes, ContractReceiveChannelClosed, {})
def test_retryqueue_idle_terminate(mock_matrix: MatrixTransport, retry_interval_initial: float): """ Ensure ``RetryQueue``s exit if they are idle for too long. """ retry_queue = mock_matrix._get_retrier(Address(factories.HOP1)) idle_after = RETRY_QUEUE_IDLE_AFTER * retry_interval_initial with Timeout(idle_after + (retry_interval_initial * 5)): # Retry while not gevent.joinall({retry_queue.greenlet}, idle_after / 2, raise_error=True): pass assert retry_queue.greenlet.ready() assert retry_queue._idle_since == RETRY_QUEUE_IDLE_AFTER assert retry_queue.is_idle retry_queue_2 = mock_matrix._get_retrier(Address(factories.HOP1)) # Since the initial RetryQueue has exited `get_retrier()` must return a new instance assert retry_queue_2 is not retry_queue
def test_reject_invite_of_invalid_room(mock_matrix: MatrixTransport, monkeypatch, signer, invite_state): invalid_room_id = RoomID("!someroom:invalidserver") user = create_new_users_for_address(signer)[0] mock_matrix._displayname_cache.warm_users([user]) leave_room_called = False def mock_leave_room(room_id): nonlocal leave_room_called if room_id == invalid_room_id: leave_room_called = True monkeypatch.setattr(mock_matrix._client.api, "leave_room", mock_leave_room) with pytest.raises(AssertionError): mock_matrix._handle_invite(invalid_room_id, invite_state) assert leave_room_called
def test_leave_unexpected_rooms(mock_matrix: MatrixTransport, room_with_members, api_available): room, should_leave = room_with_members room.client = mock_matrix._client mock_matrix._client.rooms[room.room_id] = room if not api_available: def raise_ex(room_id): raise MatrixRequestError() mock_matrix._client.api.leave_room = raise_ex # if an api happens a MatrixRequestError should be propagated and raising a TransportError # This should only happen when a room is to be left if not api_available and should_leave: with pytest.raises(TransportError): mock_matrix._initialize_room_inventory() else: assert len(mock_matrix._client.rooms) == 1 mock_matrix._initialize_room_inventory() assert not mock_matrix._client.rooms if should_leave else mock_matrix._client.rooms
def _setup_matrix(config): if config['transport']['matrix'].get('available_servers') is None: # fetch list of known servers from raiden-network/raiden-tranport repo available_servers_url = DEFAULT_MATRIX_KNOWN_SERVERS[config['network_type']] available_servers = get_matrix_servers(available_servers_url) config['transport']['matrix']['available_servers'] = available_servers try: transport = MatrixTransport(config['transport']['matrix']) except RaidenError as ex: click.secho(f'FATAL: {ex}', fg='red') sys.exit(1) return transport
def _setup_matrix(config): if config["transport"]["matrix"].get("available_servers") is None: # fetch list of known servers from raiden-network/raiden-tranport repo available_servers_url = DEFAULT_MATRIX_KNOWN_SERVERS[config["environment_type"]] available_servers = get_matrix_servers(available_servers_url) log.debug("Fetching available matrix servers", available_servers=available_servers) config["transport"]["matrix"]["available_servers"] = available_servers # TODO: This needs to be adjusted once #3735 gets implemented # Add PFS broadcast room if enabled if config["services"]["pathfinding_service_address"] is not None: if PATH_FINDING_BROADCASTING_ROOM not in config["transport"]["matrix"]["global_rooms"]: config["transport"]["matrix"]["global_rooms"].append(PATH_FINDING_BROADCASTING_ROOM) # Add monitoring service broadcast room if enabled if config["services"]["monitoring_enabled"] is True: config["transport"]["matrix"]["global_rooms"].append(MONITORING_BROADCASTING_ROOM) try: database_path = config["database_path"] database_dir = os.path.dirname(config["database_path"]) os.makedirs(database_dir, exist_ok=True) storage = sqlite.SerializedSQLiteStorage( database_path=database_path, serializer=serialize.JSONSerializer() ) light_clients = storage.get_all_light_clients() light_client_transports = [] for light_client in light_clients: light_client_transport = get_matrix_light_client_instance(config["transport"]["matrix"], light_client['password'], light_client['display_name'], light_client['seed_retry'], light_client['address']) light_client_transports.append(light_client_transport) hub_transport = MatrixTransport(config["transport"]["matrix"]) node_transport = NodeTransport(hub_transport, light_client_transports) except RaidenError as ex: click.secho(f"FATAL: {ex}", fg="red") sys.exit(1) return node_transport
def setup_matrix( transport_config: MatrixTransportConfig, services_config: ServiceConfig, environment_type: Environment, routing_mode: RoutingMode, ) -> MatrixTransport: # Add PFS broadcast room when not in private mode if routing_mode != RoutingMode.PRIVATE: if PATH_FINDING_BROADCASTING_ROOM not in transport_config.broadcast_rooms: transport_config.broadcast_rooms.append(PATH_FINDING_BROADCASTING_ROOM) # Add monitoring service broadcast room if enabled if services_config.monitoring_enabled is True: transport_config.broadcast_rooms.append(MONITORING_BROADCASTING_ROOM) return MatrixTransport(config=transport_config, environment=environment_type)
def _setup_matrix(config): if config['transport']['matrix'].get('available_servers') is None: # fetch list of known servers from raiden-network/raiden-tranport repo available_servers_url = DEFAULT_MATRIX_KNOWN_SERVERS[config['environment_type']] available_servers = get_matrix_servers(available_servers_url) log.debug('Fetching available matrix servers', available_servers=available_servers) config['transport']['matrix']['available_servers'] = available_servers # Add monitoring service broadcast room if enabled if config['services']['monitoring_enabled'] is True: config['transport']['matrix']['global_rooms'].append(MONITORING_BROADCASTING_ROOM) try: transport = MatrixTransport(config['transport']['matrix']) except RaidenError as ex: click.secho(f'FATAL: {ex}', fg='red') sys.exit(1) return transport
def restart_app(app: App) -> App: new_transport = MatrixTransport(app.raiden.config["transport"]["matrix"]) app = App( config=app.config, rpc_client=app.raiden.rpc_client, proxy_manager=app.raiden.proxy_manager, query_start_block=BlockNumber(0), default_one_to_n_address=app.raiden.default_one_to_n_address, default_registry=app.raiden.default_registry, default_secret_registry=app.raiden.default_secret_registry, default_service_registry=app.raiden.default_service_registry, default_msc_address=app.raiden.default_msc_address, transport=new_transport, raiden_event_handler=RaidenEventHandler(), message_handler=MessageHandler(), routing_mode=RoutingMode.PRIVATE, ) app.start() return app
def matrix_transports( local_matrix_servers: List[ParsedURL], retries_before_backoff: int, retry_interval_initial: float, retry_interval_max: float, number_of_transports: int, broadcast_rooms: List[str], matrix_sync_timeout: int, capabilities: CapabilitiesConfig, ) -> Iterable[List[MatrixTransport]]: transports = [] local_matrix_servers_str = [str(server) for server in local_matrix_servers] for transport_index in range(number_of_transports): server = local_matrix_servers[transport_index % len(local_matrix_servers)] transports.append( MatrixTransport( config=MatrixTransportConfig( broadcast_rooms=broadcast_rooms.copy(), retries_before_backoff=retries_before_backoff, retry_interval_initial=retry_interval_initial, retry_interval_max=retry_interval_max, server=server, available_servers=local_matrix_servers_str, sync_timeout=matrix_sync_timeout, capabilities_config=capabilities, ), environment=Environment.DEVELOPMENT, )) yield transports for transport in transports: transport.stop() for transport in transports: # Calling `get()` on a never started Greenlet will block forever if transport._started: transport.greenlet.get()
def matrix_transports(local_matrix_servers, retries_before_backoff, retry_interval, private_rooms): transports = [] for server in local_matrix_servers: transports.append( MatrixTransport({ 'discovery_room': 'discovery', 'retries_before_backoff': retries_before_backoff, 'retry_interval': retry_interval, 'server': server, 'server_name': server.netloc, 'available_servers': local_matrix_servers, 'private_rooms': private_rooms, }), ) yield transports for transport in transports: transport.stop() for transport in transports: transport.get()
def run_test_send_queued_messages( raiden_network, deposit, token_addresses, network_wait, ): app0, app1 = raiden_network token_address = token_addresses[0] chain_state = views.state_from_app(app0) payment_network_id = app0.raiden.default_registry.address token_network_identifier = views.get_token_network_identifier_by_token_address( chain_state, payment_network_id, token_address, ) with dont_handle_node_change_network_state(): # stop app1 - transfer must be left unconfirmed app1.stop() # make a few transfers from app0 to app1 amount = 1 spent_amount = 7 identifier = 1 for _ in range(spent_amount): app0.raiden.mediated_transfer_async( token_network_identifier=token_network_identifier, amount=amount, target=app1.raiden.address, identifier=identifier, ) identifier += 1 # restart app0 app0.raiden.stop() new_transport = MatrixTransport( app0.raiden.config['transport']['matrix'], ) raiden_event_handler = RaidenEventHandler() message_handler = MessageHandler() app0_restart = App( config=app0.config, chain=app0.raiden.chain, query_start_block=0, default_registry=app0.raiden.default_registry, default_secret_registry=app0.raiden.default_secret_registry, default_service_registry=app0.raiden.default_service_registry, transport=new_transport, raiden_event_handler=raiden_event_handler, message_handler=message_handler, discovery=app0.raiden.discovery, ) app0.stop() del app0 # from here on the app0_restart should be used app1.start() app0_restart.start() waiting.wait_for_healthy( app0_restart.raiden, app1.raiden.address, network_wait, ) waiting.wait_for_healthy( app1.raiden, app0_restart.raiden.address, network_wait, ) exception = RuntimeError('Timeout while waiting for new channel') with gevent.Timeout(5, exception=exception): waiting.wait_for_newchannel( raiden=app0_restart.raiden, payment_network_id=payment_network_id, token_address=token_address, partner_address=app1.raiden.address, retry_timeout=network_wait, ) exception = RuntimeError( 'Timeout while waiting for balance update for app0') with gevent.Timeout(30, exception=exception): waiting.wait_for_payment_balance( raiden=app0_restart.raiden, payment_network_id=payment_network_id, token_address=token_address, partner_address=app1.raiden.address, target_address=app1.raiden.address, target_balance=spent_amount, retry_timeout=network_wait, ) waiting.wait_for_payment_balance( raiden=app1.raiden, payment_network_id=payment_network_id, token_address=token_address, partner_address=app0_restart.raiden.address, target_address=app1.raiden.address, target_balance=spent_amount, retry_timeout=network_wait, ) assert_synced_channel_state( token_network_identifier, app0_restart, deposit - spent_amount, [], app1, deposit + spent_amount, [], )
def run_test_payment_statuses_are_restored( raiden_network, token_addresses, network_wait, ): app0, app1 = raiden_network token_address = token_addresses[0] chain_state = views.state_from_app(app0) payment_network_id = app0.raiden.default_registry.address token_network_identifier = views.get_token_network_identifier_by_token_address( chain_state, payment_network_id, token_address, ) app0.event_handler = HoldRaidenEvent() app0.event_handler.hold(SendSecretReveal, {}) # make a few transfers from app0 to app1 amount = 1 spent_amount = 7 identifier = 1 for identifier in range(spent_amount): identifier = identifier + 1 payment_status = app0.raiden.mediated_transfer_async( token_network_identifier=token_network_identifier, amount=amount, target=app1.raiden.address, identifier=identifier, ) assert payment_status.payment_identifier == identifier raiden_event_handler = RaidenEventHandler() message_handler = MessageHandler() app0_restart = App( config=app0.config, chain=app0.raiden.chain, query_start_block=0, default_registry=app0.raiden.default_registry, default_secret_registry=app0.raiden.default_secret_registry, default_service_registry=app0.raiden.default_service_registry, transport=MatrixTransport(app0.raiden.config['transport']['matrix'], ), raiden_event_handler=raiden_event_handler, message_handler=message_handler, discovery=app0.raiden.discovery, ) app0.stop() del app0 # from here on the app0_restart should be used # stop app1 to make sure that we don't complete the transfers before our checks app1.stop() app0_restart.start() # Check that the payment statuses were restored properly after restart for identifier in range(spent_amount): identifier = identifier + 1 mapping = app0_restart.raiden.targets_to_identifiers_to_statuses status = mapping[app1.raiden.address][identifier] assert status.amount == 1 assert status.payment_identifier == identifier assert status.token_network_identifier == token_network_identifier app1.start() # now that our checks are done start app1 again waiting.wait_for_healthy( app0_restart.raiden, app1.raiden.address, network_wait, ) waiting.wait_for_payment_balance( raiden=app1.raiden, payment_network_id=payment_network_id, token_address=token_address, partner_address=app0_restart.raiden.address, target_address=app1.raiden.address, target_balance=spent_amount, retry_timeout=network_wait, ) # Check that payments are completed after both nodes come online after restart for identifier in range(spent_amount): assert raiden_events_search_for_item( app0_restart.raiden, EventPaymentSentSuccess, { 'identifier': identifier + 1, 'amount': 1 }, )
def test_matrix_message_retry( local_matrix_servers, private_rooms, retry_interval, retries_before_backoff, ): """ Test the retry mechanism implemented into the matrix client. The test creates a transport and sends a message. Given that the receiver was online, the initial message is sent but the receiver doesn't respond in time and goes offline. The retrier should then wait for the `retry_interval` duration to pass and send the message again but this won't work because the receiver is offline. Once the receiver comes back again, the message should be sent again. """ partner_address = make_address() transport = MatrixTransport({ 'discovery_room': 'discovery', 'retries_before_backoff': retries_before_backoff, 'retry_interval': retry_interval, 'server': local_matrix_servers[0], 'server_name': local_matrix_servers[0].netloc, 'available_servers': [], 'private_rooms': private_rooms, }) transport._send_raw = MagicMock() raiden_service = MockRaidenService(None) transport.start( raiden_service, raiden_service.message_handler, None, ) transport.log = MagicMock() # Receiver is online transport._address_to_presence[partner_address] = UserPresence.ONLINE queueid = QueueIdentifier( recipient=partner_address, channel_identifier=CHANNEL_IDENTIFIER_GLOBAL_QUEUE, ) chain_state = raiden_service.wal.state_manager.current_state retry_queue: _RetryQueue = transport._get_retrier(partner_address) assert bool(retry_queue), 'retry_queue not running' # Send the initial message message = Processed(0) message.sign(transport._raiden_service.private_key) chain_state.queueids_to_queues[queueid] = [message] retry_queue.enqueue_global(message) gevent.sleep(1) transport._send_raw.call_count = 1 # Receiver goes offline transport._address_to_presence[partner_address] = UserPresence.OFFLINE gevent.sleep(retry_interval) transport.log.debug.assert_called_with( 'Partner not reachable. Skipping.', partner=pex(partner_address), status=UserPresence.OFFLINE, ) # Retrier did not call send_raw given that the receiver is still offline assert transport._send_raw.call_count == 1 # Receiver comes back online transport._address_to_presence[partner_address] = UserPresence.ONLINE gevent.sleep(retry_interval) # Retrier now should have sent the message again assert transport._send_raw.call_count == 2 transport.stop() transport.get()
def test_send_queued_messages( # pylint: disable=unused-argument raiden_network, deposit, token_addresses, network_wait): """Test re-sending of undelivered messages on node restart""" app0, app1 = raiden_network token_address = token_addresses[0] chain_state = views.state_from_app(app0) token_network_registry_address = app0.raiden.default_registry.address token_network_address = views.get_token_network_address_by_token_address( chain_state, token_network_registry_address, token_address) assert token_network_address number_of_transfers = 7 amount_per_transfer = 1 total_transferred_amount = TokenAmount(amount_per_transfer * number_of_transfers) # Make sure none of the transfers will be sent before the restart transfers = [] for secret_seed in range(number_of_transfers): secret = make_secret(secret_seed) secrethash = sha256_secrethash(secret) transfers.append((create_default_identifier(), amount_per_transfer, secret, secrethash)) app0.raiden.raiden_event_handler.hold( SendLockedTransfer, {"transfer": { "lock": { "secrethash": secrethash } }}) for identifier, amount, secret, _ in transfers: app0.raiden.mediated_transfer_async( token_network_address=token_network_address, amount=amount, target=app1.raiden.address, identifier=identifier, secret=secret, ) app0.stop() # Restart the app. The pending transfers must be processed. new_transport = MatrixTransport(app0.raiden.config["transport"]["matrix"]) raiden_event_handler = RaidenEventHandler() message_handler = MessageHandler() app0_restart = App( config=app0.config, rpc_client=app0.raiden.rpc_client, proxy_manager=app0.raiden.proxy_manager, query_start_block=BlockNumber(0), default_registry=app0.raiden.default_registry, default_secret_registry=app0.raiden.default_secret_registry, default_service_registry=app0.raiden.default_service_registry, default_one_to_n_address=app0.raiden.default_one_to_n_address, default_msc_address=app0.raiden.default_msc_address, transport=new_transport, raiden_event_handler=raiden_event_handler, message_handler=message_handler, routing_mode=RoutingMode.PRIVATE, ) del app0 app0_restart.start() # XXX: There is no synchronization among the app and the test, so it is # possible between `start` and the check bellow that some of the transfers # have completed, making it flaky. # # Make sure the transfers are in the queue and fail otherwise. # chain_state = views.state_from_raiden(app0_restart.raiden) # for _, _, _, secrethash in transfers: # msg = "The secrethashes of the pending transfers must be in the queue after a restart." # assert secrethash in chain_state.payment_mapping.secrethashes_to_task, msg with watch_for_unlock_failures(*raiden_network): exception = RuntimeError( "Timeout while waiting for balance update for app0") with gevent.Timeout(20, exception=exception): waiting.wait_for_payment_balance( raiden=app0_restart.raiden, token_network_registry_address=token_network_registry_address, token_address=token_address, partner_address=app1.raiden.address, target_address=app1.raiden.address, target_balance=total_transferred_amount, retry_timeout=network_wait, ) exception = RuntimeError( "Timeout while waiting for balance update for app1") with gevent.Timeout(20, exception=exception): waiting.wait_for_payment_balance( raiden=app1.raiden, token_network_registry_address=token_network_registry_address, token_address=token_address, partner_address=app0_restart.raiden.address, target_address=app1.raiden.address, target_balance=total_transferred_amount, retry_timeout=network_wait, ) assert_synced_channel_state( token_network_address, app0_restart, deposit - total_transferred_amount, [], app1, deposit + total_transferred_amount, [], ) new_transport.stop()
def test_payment_statuses_are_restored( # pylint: disable=unused-argument raiden_network, token_addresses, network_wait): """ Test that when the Raiden is restarted, the dictionary of `targets_to_identifiers_to_statuses` is populated before the transport is started. This should happen because if a client gets restarted during a transfer cycle, once restarted, the client will proceed with the cycle until the transfer is successfully sent. However, the dictionary `targets_to_identifiers_to_statuses` will not contain the payment identifiers that were originally registered when the previous client started the transfers. Related issue: https://github.com/raiden-network/raiden/issues/3432 """ app0, app1 = raiden_network token_address = token_addresses[0] chain_state = views.state_from_app(app0) token_network_registry_address = app0.raiden.default_registry.address token_network_address = views.get_token_network_address_by_token_address( chain_state, token_network_registry_address, token_address) # make a few transfers from app0 to app1 amount = 1 spent_amount = TokenAmount(7) for identifier in range(spent_amount): # Make sure the transfer is not completed secret = make_secret(identifier) app0.raiden.raiden_event_handler.hold(SendSecretReveal, {"secret": secret}) identifier = identifier + 1 payment_status = app0.raiden.mediated_transfer_async( token_network_address=token_network_address, amount=amount, target=app1.raiden.address, identifier=identifier, secret=secret, ) assert payment_status.payment_identifier == identifier app0_restart = App( config=app0.config, rpc_client=app0.raiden.rpc_client, proxy_manager=app0.raiden.proxy_manager, query_start_block=BlockNumber(0), default_registry=app0.raiden.default_registry, default_secret_registry=app0.raiden.default_secret_registry, default_service_registry=app0.raiden.default_service_registry, default_one_to_n_address=app0.raiden.default_one_to_n_address, default_msc_address=app0.raiden.default_msc_address, transport=MatrixTransport(app0.raiden.config["transport"]["matrix"]), raiden_event_handler=RaidenEventHandler(), message_handler=MessageHandler(), routing_mode=RoutingMode.PRIVATE, ) app0.stop() del app0 # from here on the app0_restart should be used # stop app1 to make sure that we don't complete the transfers before our checks app1.stop() app0_restart.start() # Check that the payment statuses were restored properly after restart for identifier in range(spent_amount): identifier = PaymentID(identifier + 1) mapping = app0_restart.raiden.targets_to_identifiers_to_statuses status = mapping[app1.raiden.address][identifier] assert status.amount == 1 assert status.payment_identifier == identifier assert status.token_network_address == token_network_address app1.start() # now that our checks are done start app1 again with watch_for_unlock_failures(*raiden_network): waiting.wait_for_healthy(app0_restart.raiden, app1.raiden.address, network_wait) waiting.wait_for_payment_balance( raiden=app1.raiden, token_network_registry_address=token_network_registry_address, token_address=token_address, partner_address=app0_restart.raiden.address, target_address=app1.raiden.address, target_balance=spent_amount, retry_timeout=network_wait, ) # Check that payments are completed after both nodes come online after restart for identifier in range(spent_amount): assert raiden_events_search_for_item( app0_restart.raiden, EventPaymentSentSuccess, { "identifier": identifier + 1, "amount": 1 }, )
def test_join_invalid_discovery( local_matrix_servers, private_rooms, retry_interval, retries_before_backoff, ): """_join_discovery_room tries to join on all servers on available_servers config If any of the servers isn't reachable by synapse, it'll return a 500 response, which needs to be handled, and if no discovery room is found on any of the available_servers, one in our current server should be created """ transport = MatrixTransport({ 'discovery_room': 'discovery', 'retries_before_backoff': retries_before_backoff, 'retry_interval': retry_interval, 'server': local_matrix_servers[0], 'server_name': local_matrix_servers[0].netloc, 'available_servers': ['http://invalid.server'], 'private_rooms': private_rooms, }) transport._client.api.retry_timeout = 0 transport._send_raw = MagicMock() raiden_service = MockRaidenService(None) transport.start( raiden_service, raiden_service.message_handler, None, ) transport.log = MagicMock() transport._join_discovery_room() assert isinstance(transport._discovery_room, Room) transport.stop() transport.get()