def split_endpoint(endpoint: Endpoint) -> HostPort: match = re.match(r"(?:[a-z0-9]*:?//)?([^:/]+)(?::(\d+))?", endpoint, re.I) if not match: raise ValueError("Invalid endpoint", endpoint) host, port = match.groups() if not port: port = "0" return Host(host), Port(int(port))
def split_endpoint(endpoint: str) -> HostPort: match = re.match(r"(?:[a-z0-9]*:?//)?([^:/]+)(?::(\d+))?", endpoint, re.I) if not match: raise ValueError("Invalid endpoint", endpoint) host, port = match.groups() returned_port = None if port: returned_port = Port(int(port)) return Host(host), returned_port
def start_apiserver(raiden_app: App, rest_api_port_number: Port) -> APIServer: raiden_api = RaidenAPI(raiden_app.raiden) rest_api = RestAPI(raiden_api) api_server = APIServer(rest_api, config=RestApiConfig(host=Host("localhost"), port=rest_api_port_number)) # required for url_for api_server.flask_app.config[ "SERVER_NAME"] = f"localhost:{rest_api_port_number}" api_server.start() wait_for_listening_port(rest_api_port_number) return api_server
def main() -> None: import argparse import configparser import re NODE_SECTION_RE = re.compile("^node[0-9]+") parser = argparse.ArgumentParser() parser.add_argument("--nodes-data-dir", default=os.getcwd()) parser.add_argument("--wait-after-first-sync", default=False, action="store_true") parser.add_argument("--profiler-data-directory", default=None) parser.add_argument("--interface", default="127.0.0.1") parser.add_argument("--iterations", default=5, type=int) parser.add_argument("config") args = parser.parse_args() if args.profiler_data_directory is not None and os.geteuid() != 0: raise RuntimeError( "To enable profiling the script has to be executed with root.") config = configparser.ConfigParser() config.read(args.config) datadir = args.nodes_data_dir interface = Host(args.interface) port_generator = get_free_port(5000) retry_timeout = 1 nodes_config: List[NodeConfig] = list() token_address = config.defaults()["token-address"] if not is_checksum_address(token_address): raise ValueError( f"Invalid token address {token_address}, check it is checksummed.") defaults = { "--log-config": "raiden:DEBUG", "--environment-type": "development", "--datadir": datadir, } for section in config: if NODE_SECTION_RE.match(section): node_config = config[section] address = node_config["address"] node = defaults.copy() node.update({ "--keystore-path": node_config["keystore-path"], "--password-file": node_config["password-file"], "--eth-rpc-endpoint": node_config["eth-rpc-endpoint"], "--network-id": node_config["network-id"], "--address": address, }) pathfinding_url = node_config.get("pathfinding-service-address") if pathfinding_url is not None: node["--pathfinding-service-address"] = pathfinding_url raiden_args = [ "raiden", "--accept-disclaimer", "--log-json", "--disable-debug-logfile", "--flat-fee", token_address, "0", "--proportional-fee", token_address, "0", "--proportional-imbalance-fee", token_address, "0", ] raiden_args.extend(chain.from_iterable(node.items())) # The REST interface uses checksummed address. Normalize it here. address = to_checksum_address(address) nodedir = os.path.join( datadir, f"node_{pex(to_canonical_address(address))}") nodes_config.append( NodeConfig(raiden_args, interface, address, nodedir)) # TODO: Determine the `capacity_lower_bound` by querying the nodes. capacity_lower_bound = 1130220 profiler_data_directory = args.profiler_data_directory iterations = args.iterations if iterations is None: iteration_counter = count() else: iteration_counter = iter(range(iterations)) # def stop_on_signal(sig=None, _frame=None): # stop.set() # gevent.signal(signal.SIGQUIT, stop_on_signal) # gevent.signal(signal.SIGTERM, stop_on_signal) # gevent.signal(signal.SIGINT, stop_on_signal) # TODO: If any of the processes crashes the script should collect and # bundle the logs. # # Cleanup with the Janitor is not strictily necessary for the stress test, # since once can assume a bug happened and the state of the node is # inconsistent, however it is nice to have. with Janitor() as nursery: nodes_running = start_and_wait_for_all_servers(nursery, port_generator, nodes_config, retry_timeout) if nodes_running is None: return if args.wait_after_first_sync: nursery.spawn_under_watch(wait_for_user_input).get() test_config = StressTestConfiguration( port_generator, retry_timeout, Amount(capacity_lower_bound), token_address, iteration_counter, profiler_data_directory, ) nursery.spawn_under_watch(run_stress_test, nursery, nodes_running, test_config) nursery.wait(timeout=None)
def create_apps( chain_id: ChainID, contracts_path: Path, blockchain_services: BlockchainServices, token_network_registry_address: TokenNetworkRegistryAddress, one_to_n_address: Optional[OneToNAddress], secret_registry_address: SecretRegistryAddress, service_registry_address: Optional[ServiceRegistryAddress], user_deposit_address: Optional[UserDepositAddress], monitoring_service_contract_address: MonitoringServiceAddress, reveal_timeout: BlockTimeout, settle_timeout: BlockTimeout, database_basedir: str, retry_interval_initial: float, retry_interval_max: float, retries_before_backoff: int, environment_type: Environment, unrecoverable_error_should_crash: bool, local_matrix_url: Optional[ParsedURL], broadcast_rooms: List[str], routing_mode: RoutingMode, blockchain_query_interval: float, resolver_ports: List[Optional[int]], enable_rest_api: bool, port_generator: Iterator[Port], capabilities_config: CapabilitiesConfig, ) -> List[App]: """ Create the apps.""" # pylint: disable=too-many-locals services = blockchain_services apps = [] for idx, proxy_manager in enumerate(services): database_path = database_from_privatekey(base_dir=database_basedir, app_number=idx) assert len(resolver_ports) > idx resolver_port = resolver_ports[idx] config = RaidenConfig( chain_id=chain_id, environment_type=environment_type, unrecoverable_error_should_crash=unrecoverable_error_should_crash, reveal_timeout=reveal_timeout, settle_timeout=settle_timeout, contracts_path=contracts_path, database_path=database_path, blockchain=BlockchainConfig( confirmation_blocks=DEFAULT_NUMBER_OF_BLOCK_CONFIRMATIONS, query_interval=blockchain_query_interval, ), mediation_fees=MediationFeeConfig(), services=ServiceConfig(monitoring_enabled=False), rest_api=RestApiConfig(rest_api_enabled=enable_rest_api, host=Host("localhost"), port=next(port_generator)), console=False, transport_type="matrix", ) config.transport.capabilities_config = capabilities_config if local_matrix_url is not None: config.transport = MatrixTransportConfig( broadcast_rooms=broadcast_rooms, retries_before_backoff=retries_before_backoff, retry_interval_initial=retry_interval_initial, retry_interval_max=retry_interval_max, server=local_matrix_url, available_servers=[], capabilities_config=capabilities_config, ) assert config.transport.capabilities_config is not None if resolver_port is not None: config.resolver_endpoint = f"http://localhost:{resolver_port}" registry = proxy_manager.token_network_registry( token_network_registry_address, block_identifier=BLOCK_ID_LATEST) secret_registry = proxy_manager.secret_registry( secret_registry_address, block_identifier=BLOCK_ID_LATEST) service_registry = None if service_registry_address: service_registry = proxy_manager.service_registry( service_registry_address, block_identifier=BLOCK_ID_LATEST) user_deposit = None if user_deposit_address: user_deposit = proxy_manager.user_deposit( user_deposit_address, block_identifier=BLOCK_ID_LATEST) # Use `TestMatrixTransport` that saves sent messages for assertions in tests assert config.transport.capabilities_config is not None transport = TestMatrixTransport(config=config.transport, environment=environment_type) raiden_event_handler = RaidenEventHandler() hold_handler = HoldRaidenEventHandler(raiden_event_handler) message_handler = WaitForMessage() api_server = None if enable_rest_api: api_server = start_api_server(rpc_client=proxy_manager.client, config=config.rest_api, eth_rpc_endpoint="bla") app = App( config=config, rpc_client=proxy_manager.client, proxy_manager=proxy_manager, query_start_block=BlockNumber(0), default_registry=registry, default_secret_registry=secret_registry, default_service_registry=service_registry, default_user_deposit=user_deposit, default_one_to_n_address=one_to_n_address, default_msc_address=monitoring_service_contract_address, transport=transport, raiden_event_handler=hold_handler, message_handler=message_handler, routing_mode=routing_mode, api_server=api_server, ) apps.append(app) return apps