def test_recovery_blockchain_events(raiden_network, token_addresses, network_wait): """ Close one of the two raiden apps that have a channel between them, have the counterparty close the channel and then make sure the restarted app sees the change """ app0, app1 = raiden_network token_address = token_addresses[0] app0.raiden.stop() new_transport = MatrixTransport(app0.raiden.config["transport"]["matrix"]) app1_api = RaidenAPI(app1.raiden) app1_api.channel_close( registry_address=app0.raiden.default_registry.address, token_address=token_address, partner_address=app0.raiden.address, ) app0.stop() raiden_event_handler = RaidenEventHandler() message_handler = MessageHandler() app0_restart = App( config=app0.config, rpc_client=app0.raiden.rpc_client, proxy_manager=app0.raiden.proxy_manager, query_start_block=BlockNumber(0), default_registry=app0.raiden.default_registry, default_one_to_n_address=app0.raiden.default_one_to_n_address, default_secret_registry=app0.raiden.default_secret_registry, default_service_registry=app0.raiden.default_service_registry, default_msc_address=app0.raiden.default_msc_address, transport=new_transport, raiden_event_handler=raiden_event_handler, message_handler=message_handler, routing_mode=RoutingMode.PRIVATE, ) del app0 # from here on the app0_restart should be used app0_restart.raiden.start() # wait for the nodes' healthcheck to update the network statuses waiting.wait_for_healthy(app0_restart.raiden, app1.raiden.address, network_wait) waiting.wait_for_healthy(app1.raiden, app0_restart.raiden.address, network_wait) restarted_state_changes = app0_restart.raiden.wal.storage.get_statechanges_by_range( RANGE_ALL_STATE_CHANGES ) assert search_for_item(restarted_state_changes, ContractReceiveChannelClosed, {})
def create_apps( chain_id: ChainID, contracts_path: str, blockchain_services: BlockchainServices, token_network_registry_address: TokenNetworkRegistryAddress, one_to_n_address: Optional[Address], secret_registry_address: SecretRegistryAddress, service_registry_address: Optional[Address], user_deposit_address: Address, monitoring_service_contract_address: Address, reveal_timeout: BlockTimeout, settle_timeout: BlockTimeout, database_basedir: str, retry_interval: float, retries_before_backoff: int, environment_type: Environment, unrecoverable_error_should_crash: bool, local_matrix_url: Optional[ParsedURL], private_rooms: bool, global_rooms: List[str], routing_mode: RoutingMode, blockchain_query_interval: float, resolver_ports: List[Optional[int]], ) -> List[App]: """ Create the apps.""" # pylint: disable=too-many-locals services = blockchain_services apps = [] for idx, proxy_manager in enumerate(services): database_path = database_from_privatekey(base_dir=database_basedir, app_number=idx) assert len(resolver_ports) > idx resolver_port = resolver_ports[idx] config = { "chain_id": chain_id, "environment_type": environment_type, "unrecoverable_error_should_crash": unrecoverable_error_should_crash, "reveal_timeout": reveal_timeout, "settle_timeout": settle_timeout, "contracts_path": contracts_path, "database_path": database_path, "blockchain": { "confirmation_blocks": DEFAULT_NUMBER_OF_BLOCK_CONFIRMATIONS, "query_interval": blockchain_query_interval, }, "transport": {}, "rpc": True, "console": False, "mediation_fees": MediationFeeConfig(), } if local_matrix_url is not None: merge_dict( config, { "transport_type": "matrix", "transport": { "matrix": { "global_rooms": global_rooms, "retries_before_backoff": retries_before_backoff, "retry_interval": retry_interval, "server": local_matrix_url, "server_name": local_matrix_url.netloc, "available_servers": [], "private_rooms": private_rooms, } }, }, ) if resolver_port is not None: merge_dict(config, { "resolver_endpoint": "http://localhost:" + str(resolver_port) }) config_copy = deepcopy(App.DEFAULT_CONFIG) config_copy.update(config) registry = proxy_manager.token_network_registry( token_network_registry_address) secret_registry = proxy_manager.secret_registry( secret_registry_address) service_registry = None if service_registry_address: service_registry = proxy_manager.service_registry( service_registry_address) user_deposit = None if user_deposit_address: user_deposit = proxy_manager.user_deposit(user_deposit_address) transport = MatrixTransport(config["transport"]["matrix"]) raiden_event_handler = RaidenEventHandler() hold_handler = HoldRaidenEventHandler(raiden_event_handler) message_handler = WaitForMessage() app = App( config=config_copy, rpc_client=proxy_manager.client, proxy_manager=proxy_manager, query_start_block=BlockNumber(0), default_registry=registry, default_one_to_n_address=one_to_n_address, default_secret_registry=secret_registry, default_service_registry=service_registry, default_msc_address=monitoring_service_contract_address, transport=transport, raiden_event_handler=hold_handler, message_handler=message_handler, user_deposit=user_deposit, routing_mode=routing_mode, ) apps.append(app) return apps
def run_app( address: Address, keystore_path: str, gas_price: Callable, eth_rpc_endpoint: str, tokennetwork_registry_contract_address: Address, one_to_n_contract_address: Address, secret_registry_contract_address: Address, service_registry_contract_address: Address, endpoint_registry_contract_address: Address, user_deposit_contract_address: Address, listen_address: str, mapped_socket, max_unresponsive_time: int, api_address: str, rpc: bool, sync_check: bool, console: bool, password_file: TextIO, web_ui: bool, datadir: str, transport: str, matrix_server: str, network_id: int, environment_type: Environment, unrecoverable_error_should_crash: bool, pathfinding_service_address: str, pathfinding_max_paths: int, enable_monitoring: bool, resolver_endpoint: str, routing_mode: RoutingMode, config: Dict[str, Any], ** kwargs: Any, # FIXME: not used here, but still receives stuff in smoketest ): # pylint: disable=too-many-locals,too-many-branches,too-many-statements,unused-argument from raiden.app import App if transport == "udp" and not mapped_socket: raise RuntimeError("Missing socket") if datadir is None: datadir = os.path.join(os.path.expanduser("~"), ".raiden") account_manager = AccountManager(keystore_path) web3 = Web3(HTTPProvider(rpc_normalized_endpoint(eth_rpc_endpoint))) check_sql_version() check_ethereum_has_accounts(account_manager) check_ethereum_client_is_supported(web3) check_ethereum_network_id(network_id, web3) (address, privatekey_bin) = get_account_and_private_key(account_manager, address, password_file) (listen_host, listen_port) = split_endpoint(listen_address) (api_host, api_port) = split_endpoint(api_address) config["transport"]["udp"]["host"] = listen_host config["transport"]["udp"]["port"] = listen_port config["console"] = console config["rpc"] = rpc config["web_ui"] = rpc and web_ui config["api_host"] = api_host config["api_port"] = api_port config["resolver_endpoint"] = resolver_endpoint if mapped_socket: config["socket"] = mapped_socket.socket config["transport"]["udp"]["external_ip"] = mapped_socket.external_ip config["transport"]["udp"][ "external_port"] = mapped_socket.external_port config["transport_type"] = transport config["transport"]["matrix"]["server"] = matrix_server config["transport"]["udp"][ "nat_keepalive_retries"] = DEFAULT_NAT_KEEPALIVE_RETRIES timeout = max_unresponsive_time / DEFAULT_NAT_KEEPALIVE_RETRIES config["transport"]["udp"]["nat_keepalive_timeout"] = timeout config[ "unrecoverable_error_should_crash"] = unrecoverable_error_should_crash config["services"]["pathfinding_max_paths"] = pathfinding_max_paths config["services"]["monitoring_enabled"] = enable_monitoring config["chain_id"] = network_id setup_environment(config, environment_type) contracts = setup_contracts_or_exit(config, network_id) rpc_client = JSONRPCClient( web3, privatekey_bin, gas_price_strategy=gas_price, block_num_confirmations=DEFAULT_NUMBER_OF_BLOCK_CONFIRMATIONS, uses_infura="infura.io" in eth_rpc_endpoint, ) blockchain_service = BlockChainService(jsonrpc_client=rpc_client, contract_manager=ContractManager( config["contracts_path"])) if sync_check: check_synced(blockchain_service) proxies = setup_proxies_or_exit( config=config, tokennetwork_registry_contract_address= tokennetwork_registry_contract_address, secret_registry_contract_address=secret_registry_contract_address, endpoint_registry_contract_address=endpoint_registry_contract_address, user_deposit_contract_address=user_deposit_contract_address, service_registry_contract_address=service_registry_contract_address, blockchain_service=blockchain_service, contracts=contracts, routing_mode=routing_mode, pathfinding_service_address=pathfinding_service_address, ) database_path = os.path.join( datadir, f"node_{pex(address)}", f"netid_{network_id}", f"network_{pex(proxies.token_network_registry.address)}", f"v{RAIDEN_DB_VERSION}_log.db", ) config["database_path"] = database_path print("\nYou are connected to the '{}' network and the DB path is: {}". format(ID_TO_NETWORKNAME.get(network_id, network_id), database_path)) discovery = None if transport == "udp": transport, discovery = setup_udp_or_exit( config, blockchain_service, address, contracts, endpoint_registry_contract_address) elif transport == "matrix": transport = _setup_matrix(config) else: raise RuntimeError(f'Unknown transport type "{transport}" given') raiden_event_handler = RaidenEventHandler() message_handler = MessageHandler() try: start_block = 0 if "TokenNetworkRegistry" in contracts: start_block = contracts["TokenNetworkRegistry"]["block_number"] raiden_app = App( config=config, chain=blockchain_service, query_start_block=BlockNumber(start_block), default_one_to_n_address=one_to_n_contract_address, default_registry=proxies.token_network_registry, default_secret_registry=proxies.secret_registry, default_service_registry=proxies.service_registry, transport=transport, raiden_event_handler=raiden_event_handler, message_handler=message_handler, discovery=discovery, user_deposit=proxies.user_deposit, ) except RaidenError as e: click.secho(f"FATAL: {e}", fg="red") sys.exit(1) try: raiden_app.start() except RuntimeError as e: click.secho(f"FATAL: {e}", fg="red") sys.exit(1) except filelock.Timeout: name_or_id = ID_TO_NETWORKNAME.get(network_id, network_id) click.secho( f"FATAL: Another Raiden instance already running for account " f"{to_normalized_address(address)} on network id {name_or_id}", fg="red", ) sys.exit(1) return raiden_app
def test_recovery_unhappy_case( raiden_network, number_of_nodes, deposit, token_addresses, network_wait, retry_timeout ): app0, app1, app2 = raiden_network token_address = token_addresses[0] chain_state = views.state_from_app(app0) token_network_registry_address = app0.raiden.default_registry.address token_network_address = views.get_token_network_address_by_token_address( chain_state, token_network_registry_address, token_address ) # make a few transfers from app0 to app2 amount = TokenAmount(1) spent_amount = deposit - 2 for identifier in range(spent_amount): transfer( initiator_app=app0, target_app=app2, token_address=token_address, amount=amount, identifier=PaymentID(identifier), timeout=network_wait * number_of_nodes, ) app0.raiden.stop() new_transport = MatrixTransport(app0.raiden.config["transport"]["matrix"]) app0.stop() RaidenAPI(app1.raiden).channel_close( app1.raiden.default_registry.address, token_address, app0.raiden.address ) channel01 = views.get_channelstate_for( views.state_from_app(app1), app1.raiden.default_registry.address, token_address, app0.raiden.address, ) waiting.wait_for_settle( app1.raiden, app1.raiden.default_registry.address, token_address, [channel01.identifier], retry_timeout, ) raiden_event_handler = RaidenEventHandler() message_handler = MessageHandler() app0_restart = App( config=app0.config, rpc_client=app0.raiden.rpc_client, proxy_manager=app0.raiden.proxy_manager, query_start_block=BlockNumber(0), default_registry=app0.raiden.default_registry, default_one_to_n_address=app0.raiden.default_one_to_n_address, default_secret_registry=app0.raiden.default_secret_registry, default_service_registry=app0.raiden.default_service_registry, default_msc_address=app0.raiden.default_msc_address, transport=new_transport, raiden_event_handler=raiden_event_handler, message_handler=message_handler, routing_mode=RoutingMode.PRIVATE, ) del app0 # from here on the app0_restart should be used app0_restart.start() state_changes = app0_restart.raiden.wal.storage.get_statechanges_by_range( RANGE_ALL_STATE_CHANGES ) assert search_for_item( state_changes, ContractReceiveChannelSettled, { "token_network_address": token_network_address, "channel_identifier": channel01.identifier, }, )
def create_apps( chain_id, contracts_path, blockchain_services, endpoint_discovery_services, token_network_registry_address, one_to_n_address: Optional[Address], secret_registry_address, service_registry_address, user_deposit_address, raiden_udp_ports, reveal_timeout, settle_timeout, database_basedir, retry_interval, retries_before_backoff, throttle_capacity, throttle_fill_rate, nat_invitation_timeout, nat_keepalive_retries, nat_keepalive_timeout, environment_type, unrecoverable_error_should_crash, local_matrix_url=None, private_rooms=None, global_rooms=None, ): """ Create the apps.""" # pylint: disable=too-many-locals services = zip(blockchain_services, endpoint_discovery_services, raiden_udp_ports) apps = [] for idx, (blockchain, discovery, port) in enumerate(services): address = blockchain.client.address host = "127.0.0.1" database_path = database_from_privatekey(base_dir=database_basedir, app_number=idx) config = { "chain_id": chain_id, "environment_type": environment_type, "unrecoverable_error_should_crash": unrecoverable_error_should_crash, "reveal_timeout": reveal_timeout, "settle_timeout": settle_timeout, "contracts_path": contracts_path, "database_path": database_path, "blockchain": {"confirmation_blocks": DEFAULT_NUMBER_OF_BLOCK_CONFIRMATIONS}, "transport": { "udp": { "external_ip": host, "external_port": port, "host": host, "nat_invitation_timeout": nat_invitation_timeout, "nat_keepalive_retries": nat_keepalive_retries, "nat_keepalive_timeout": nat_keepalive_timeout, "port": port, "retries_before_backoff": retries_before_backoff, "retry_interval": retry_interval, "throttle_capacity": throttle_capacity, "throttle_fill_rate": throttle_fill_rate, } }, "rpc": True, "console": False, } use_matrix = local_matrix_url is not None if use_matrix: merge_dict( config, { "transport_type": "matrix", "transport": { "matrix": { "global_rooms": global_rooms, "retries_before_backoff": retries_before_backoff, "retry_interval": retry_interval, "server": local_matrix_url, "server_name": local_matrix_url.netloc, "available_servers": [], "private_rooms": private_rooms, } }, }, ) config_copy = deepcopy(App.DEFAULT_CONFIG) config_copy.update(config) registry = blockchain.token_network_registry(token_network_registry_address) secret_registry = blockchain.secret_registry(secret_registry_address) service_registry = None if service_registry_address: service_registry = blockchain.service_registry(service_registry_address) user_deposit = None if user_deposit_address: user_deposit = blockchain.user_deposit(user_deposit_address) if use_matrix: transport = MatrixTransport(config["transport"]["matrix"]) else: throttle_policy = TokenBucket( config["transport"]["udp"]["throttle_capacity"], config["transport"]["udp"]["throttle_fill_rate"], ) transport = UDPTransport( address, discovery, server._udp_socket((host, port)), # pylint: disable=protected-access throttle_policy, config["transport"]["udp"], ) raiden_event_handler = RaidenEventHandler() hold_handler = HoldRaidenEventHandler(raiden_event_handler) message_handler = WaitForMessage() app = App( config=config_copy, chain=blockchain, query_start_block=BlockNumber(0), default_registry=registry, default_one_to_n_address=one_to_n_address, default_secret_registry=secret_registry, default_service_registry=service_registry, transport=transport, raiden_event_handler=hold_handler, message_handler=message_handler, discovery=discovery, user_deposit=user_deposit, ) apps.append(app) return apps
def run_test_payment_statuses_are_restored(raiden_network, token_addresses, network_wait): app0, app1 = raiden_network token_address = token_addresses[0] chain_state = views.state_from_app(app0) payment_network_id = app0.raiden.default_registry.address token_network_identifier = views.get_token_network_identifier_by_token_address( chain_state, payment_network_id, token_address) raiden_event_handler = RaidenEventHandler() app0.event_handler = HoldRaidenEventHandler(raiden_event_handler) app0.event_handler.hold(SendSecretReveal, {}) # make a few transfers from app0 to app1 amount = 1 spent_amount = 7 for identifier in range(spent_amount): identifier = identifier + 1 payment_status = app0.raiden.mediated_transfer_async( token_network_identifier=token_network_identifier, amount=amount, target=app1.raiden.address, identifier=identifier, ) assert payment_status.payment_identifier == identifier raiden_event_handler = RaidenEventHandler() message_handler = MessageHandler() app0_restart = App( config=app0.config, chain=app0.raiden.chain, query_start_block=BlockNumber(0), default_registry=app0.raiden.default_registry, default_secret_registry=app0.raiden.default_secret_registry, default_service_registry=app0.raiden.default_service_registry, default_one_to_n_address=app0.raiden.default_one_to_n_address, transport=MatrixTransport(app0.raiden.config["transport"]["matrix"]), raiden_event_handler=raiden_event_handler, message_handler=message_handler, discovery=app0.raiden.discovery, ) app0.stop() del app0 # from here on the app0_restart should be used # stop app1 to make sure that we don't complete the transfers before our checks app1.stop() app0_restart.start() # Check that the payment statuses were restored properly after restart for identifier in range(spent_amount): identifier = identifier + 1 mapping = app0_restart.raiden.targets_to_identifiers_to_statuses status = mapping[app1.raiden.address][identifier] assert status.amount == 1 assert status.payment_identifier == identifier assert status.token_network_identifier == token_network_identifier app1.start() # now that our checks are done start app1 again waiting.wait_for_healthy(app0_restart.raiden, app1.raiden.address, network_wait) waiting.wait_for_payment_balance( raiden=app1.raiden, payment_network_id=payment_network_id, token_address=token_address, partner_address=app0_restart.raiden.address, target_address=app1.raiden.address, target_balance=spent_amount, retry_timeout=network_wait, ) # Check that payments are completed after both nodes come online after restart for identifier in range(spent_amount): assert raiden_events_search_for_item( app0_restart.raiden, EventPaymentSentSuccess, { "identifier": identifier + 1, "amount": 1 }, )
def test_payment_statuses_are_restored( # pylint: disable=unused-argument raiden_network, token_addresses, network_wait): """ Test that when the Raiden is restarted, the dictionary of `targets_to_identifiers_to_statuses` is populated before the transport is started. This should happen because if a client gets restarted during a transfer cycle, once restarted, the client will proceed with the cycle until the transfer is successfully sent. However, the dictionary `targets_to_identifiers_to_statuses` will not contain the payment identifiers that were originally registered when the previous client started the transfers. Related issue: https://github.com/raiden-network/raiden/issues/3432 """ app0, app1 = raiden_network token_address = token_addresses[0] chain_state = views.state_from_app(app0) token_network_registry_address = app0.raiden.default_registry.address token_network_address = views.get_token_network_address_by_token_address( chain_state, token_network_registry_address, token_address) # make a few transfers from app0 to app1 amount = 1 spent_amount = TokenAmount(7) for identifier in range(spent_amount): # Make sure the transfer is not completed secret = make_secret(identifier) app0.raiden.raiden_event_handler.hold(SendSecretReveal, {"secret": secret}) identifier = identifier + 1 payment_status = app0.raiden.mediated_transfer_async( token_network_address=token_network_address, amount=amount, target=app1.raiden.address, identifier=identifier, secret=secret, ) assert payment_status.payment_identifier == identifier app0_restart = App( config=app0.config, rpc_client=app0.raiden.rpc_client, proxy_manager=app0.raiden.proxy_manager, query_start_block=BlockNumber(0), default_registry=app0.raiden.default_registry, default_secret_registry=app0.raiden.default_secret_registry, default_service_registry=app0.raiden.default_service_registry, default_one_to_n_address=app0.raiden.default_one_to_n_address, default_msc_address=app0.raiden.default_msc_address, transport=MatrixTransport(app0.raiden.config["transport"]["matrix"]), raiden_event_handler=RaidenEventHandler(), message_handler=MessageHandler(), routing_mode=RoutingMode.PRIVATE, ) app0.stop() del app0 # from here on the app0_restart should be used # stop app1 to make sure that we don't complete the transfers before our checks app1.stop() app0_restart.start() # Check that the payment statuses were restored properly after restart for identifier in range(spent_amount): identifier = identifier + 1 mapping = app0_restart.raiden.targets_to_identifiers_to_statuses status = mapping[app1.raiden.address][identifier] assert status.amount == 1 assert status.payment_identifier == identifier assert status.token_network_address == token_network_address app1.start() # now that our checks are done start app1 again waiting.wait_for_healthy(app0_restart.raiden, app1.raiden.address, network_wait) waiting.wait_for_payment_balance( raiden=app1.raiden, token_network_registry_address=token_network_registry_address, token_address=token_address, partner_address=app0_restart.raiden.address, target_address=app1.raiden.address, target_balance=spent_amount, retry_timeout=network_wait, ) # Check that payments are completed after both nodes come online after restart for identifier in range(spent_amount): assert raiden_events_search_for_item( app0_restart.raiden, EventPaymentSentSuccess, { "identifier": identifier + 1, "amount": 1 }, )
def run_app( address: Address, keystore_path: str, gas_price: Callable, eth_rpc_endpoint: str, tokennetwork_registry_contract_address: TokenNetworkRegistryAddress, one_to_n_contract_address: Address, secret_registry_contract_address: Address, service_registry_contract_address: Address, user_deposit_contract_address: Address, monitoring_service_contract_address: Address, api_address: Endpoint, rpc: bool, sync_check: bool, console: bool, password_file: TextIO, web_ui: bool, datadir: str, transport: str, matrix_server: str, network_id: ChainID, environment_type: Environment, unrecoverable_error_should_crash: bool, pathfinding_service_address: str, pathfinding_max_paths: int, enable_monitoring: bool, resolver_endpoint: str, routing_mode: RoutingMode, config: Dict[str, Any], flat_fee: Tuple[Tuple[TokenAddress, FeeAmount], ...], proportional_fee: Tuple[Tuple[TokenAddress, ProportionalFeeAmount], ...], proportional_imbalance_fee: Tuple[Tuple[TokenAddress, ProportionalFeeAmount], ...], blockchain_query_interval: float, ** kwargs: Any, # FIXME: not used here, but still receives stuff in smoketest ): # pylint: disable=too-many-locals,too-many-branches,too-many-statements,unused-argument from raiden.app import App token_network_registry_deployed_at: Optional[BlockNumber] smart_contracts_start_at: BlockNumber if datadir is None: datadir = os.path.join(os.path.expanduser("~"), ".raiden") account_manager = AccountManager(keystore_path) web3 = Web3(HTTPProvider(rpc_normalized_endpoint(eth_rpc_endpoint))) check_sql_version() check_ethereum_has_accounts(account_manager) check_ethereum_client_is_supported(web3) check_ethereum_network_id(network_id, web3) address, privatekey = get_account_and_private_key(account_manager, address, password_file) api_host, api_port = split_endpoint(api_address) if not api_port: api_port = Port(DEFAULT_HTTP_SERVER_PORT) fee_config = prepare_mediation_fee_config( cli_token_to_flat_fee=flat_fee, cli_token_to_proportional_fee=proportional_fee, cli_token_to_proportional_imbalance_fee=proportional_imbalance_fee, ) config["console"] = console config["rpc"] = rpc config["web_ui"] = rpc and web_ui config["api_host"] = api_host config["api_port"] = api_port config["resolver_endpoint"] = resolver_endpoint config["transport_type"] = transport config["transport"]["matrix"]["server"] = matrix_server config[ "unrecoverable_error_should_crash"] = unrecoverable_error_should_crash config["services"]["pathfinding_max_paths"] = pathfinding_max_paths config["services"]["monitoring_enabled"] = enable_monitoring config["chain_id"] = network_id config["mediation_fees"] = fee_config config["blockchain"]["query_interval"] = blockchain_query_interval setup_environment(config, environment_type) contracts = setup_contracts_or_exit(config, network_id) rpc_client = JSONRPCClient( web3=web3, privkey=privatekey, gas_price_strategy=gas_price, block_num_confirmations=DEFAULT_NUMBER_OF_BLOCK_CONFIRMATIONS, uses_infura="infura.io" in eth_rpc_endpoint, ) token_network_registry_deployed_at = None if "TokenNetworkRegistry" in contracts: token_network_registry_deployed_at = BlockNumber( contracts["TokenNetworkRegistry"]["block_number"]) if token_network_registry_deployed_at is None: smart_contracts_start_at = get_smart_contracts_start_at(network_id) else: smart_contracts_start_at = token_network_registry_deployed_at proxy_manager = ProxyManager( rpc_client=rpc_client, contract_manager=ContractManager(config["contracts_path"]), metadata=ProxyManagerMetadata( token_network_registry_deployed_at= token_network_registry_deployed_at, filters_start_at=smart_contracts_start_at, ), ) if sync_check: check_synced(proxy_manager) proxies = setup_proxies_or_exit( config=config, tokennetwork_registry_contract_address= tokennetwork_registry_contract_address, secret_registry_contract_address=secret_registry_contract_address, user_deposit_contract_address=user_deposit_contract_address, service_registry_contract_address=service_registry_contract_address, proxy_manager=proxy_manager, contracts=contracts, routing_mode=routing_mode, pathfinding_service_address=pathfinding_service_address, ) check_ethereum_confirmed_block_is_not_pruned( jsonrpc_client=rpc_client, secret_registry=proxies.secret_registry, confirmation_blocks=config["blockchain"]["confirmation_blocks"], ) database_path = os.path.join( datadir, f"node_{pex(address)}", f"netid_{network_id}", f"network_{pex(proxies.token_network_registry.address)}", f"v{RAIDEN_DB_VERSION}_log.db", ) config["database_path"] = database_path print("\nYou are connected to the '{}' network and the DB path is: {}". format(ID_TO_NETWORKNAME.get(network_id, network_id), database_path)) if transport == "matrix": matrix_transport = _setup_matrix(config, routing_mode) else: raise RuntimeError(f'Unknown transport type "{transport}" given') event_handler: EventHandler = RaidenEventHandler() # User should be told how to set fees, if using default fee settings log.debug("Fee Settings", fee_settings=fee_config) has_default_fees = (len(fee_config.token_to_flat_fee) == 0 and len(fee_config.token_to_proportional_fee) == 0 and len(fee_config.token_to_proportional_imbalance_fee) == 0) if has_default_fees: click.secho( "Default fee settings are used. " "If you want use Raiden with mediation fees - flat, proportional and imbalance fees - " "see https://raiden-network.readthedocs.io/en/latest/overview_and_guide.html#firing-it-up", # noqa: E501 fg="yellow", ) # Only send feedback when PFS is used if routing_mode == RoutingMode.PFS: event_handler = PFSFeedbackEventHandler(event_handler) message_handler = MessageHandler() try: raiden_app = App( config=config, rpc_client=rpc_client, proxy_manager=proxy_manager, query_start_block=smart_contracts_start_at, default_one_to_n_address=(one_to_n_contract_address or contracts[CONTRACT_ONE_TO_N]["address"]), default_registry=proxies.token_network_registry, default_secret_registry=proxies.secret_registry, default_service_registry=proxies.service_registry, default_msc_address=( monitoring_service_contract_address or contracts[CONTRACT_MONITORING_SERVICE]["address"]), transport=matrix_transport, raiden_event_handler=event_handler, message_handler=message_handler, routing_mode=routing_mode, user_deposit=proxies.user_deposit, ) except RaidenError as e: click.secho(f"FATAL: {e}", fg="red") sys.exit(1) try: raiden_app.start() except RuntimeError as e: click.secho(f"FATAL: {e}", fg="red") sys.exit(1) except filelock.Timeout: name_or_id = ID_TO_NETWORKNAME.get(network_id, network_id) click.secho( f"FATAL: Another Raiden instance already running for account " f"{to_checksum_address(address)} on network id {name_or_id}", fg="red", ) sys.exit(1) return raiden_app
def test_register_secret_happy_path(secret_registry_proxy: SecretRegistry, contract_manager): """Test happy path of SecretRegistry with a single secret. Test that `register_secret` changes the smart contract state by registering the secret, this can be verified by the block height and the existence of the SecretRegistered event. """ secret = make_secret() secrethash = sha256_secrethash(secret) secret_unregistered = make_secret() secrethash_unregistered = sha256_secrethash(secret_unregistered) secret_registered_filter = secret_registry_proxy.secret_registered_filter() assert not secret_registry_proxy.is_secret_registered( secrethash=secrethash, block_identifier="latest" ), "Test setup is invalid, secret must be unknown" assert not secret_registry_proxy.is_secret_registered( secrethash=secrethash_unregistered, block_identifier="latest" ), "Test setup is invalid, secret must be unknown" proxy_manager = ProxyManager( rpc_client=secret_registry_proxy.client, contract_manager=contract_manager, metadata=ProxyManagerMetadata( token_network_registry_deployed_at=GENESIS_BLOCK_NUMBER, filters_start_at=GENESIS_BLOCK_NUMBER, ), ) proxy_manager.wait_until_block(BlockNumber(STATE_PRUNING_AFTER_BLOCKS + 1)) with pytest.raises(NoStateForBlockIdentifier): secret_registry_proxy.is_secret_registered( secrethash=secrethash_unregistered, block_identifier=0) secret_registry_proxy.register_secret(secret=secret) logs = [ secret_registry_proxy.proxy.decode_event(encoded_log) for encoded_log in secret_registered_filter.get_all_entries() ] secret_registered = must_have_event(logs, { "event": "SecretRevealed", "args": { "secrethash": secrethash } }) msg = "SecretRegistry.register_secret returned but the SecretRevealed event was not emitted." assert secret_registered, msg registered_block = secret_registry_proxy.get_secret_registration_block_by_secrethash( secrethash=secrethash, block_identifier="latest") msg = ( "Block height returned by the SecretRegistry.get_secret_registration_block_by_secrethash " "does not match the block from the SecretRevealed event.") assert secret_registered["blockNumber"] == registered_block, msg block = secret_registry_proxy.get_secret_registration_block_by_secrethash( secrethash=secrethash_unregistered, block_identifier="latest") assert block is None, "The secret that was not registered must not change block height!"