def _run_smoketest(): print_step('Starting Raiden') config = deepcopy(App.DEFAULT_CONFIG) if args.get('extra_config', dict()): merge_dict(config, args['extra_config']) del args['extra_config'] args['config'] = config # invoke the raiden app app = run_app(**args) raiden_api = RaidenAPI(app.raiden) rest_api = RestAPI(raiden_api) api_server = APIServer(rest_api) (api_host, api_port) = split_endpoint(args['api_address']) api_server.start(api_host, api_port) raiden_api.channel_open( registry_address=contract_addresses[ CONTRACT_TOKEN_NETWORK_REGISTRY], token_address=to_canonical_address(token.contract.address), partner_address=to_canonical_address(TEST_PARTNER_ADDRESS), ) raiden_api.set_total_channel_deposit( contract_addresses[CONTRACT_TOKEN_NETWORK_REGISTRY], to_canonical_address(token.contract.address), to_canonical_address(TEST_PARTNER_ADDRESS), TEST_DEPOSIT_AMOUNT, ) token_addresses = [to_checksum_address(token.contract.address)] success = False try: print_step('Running smoketest') error = run_smoketests( app.raiden, args['transport'], token_addresses, contract_addresses[CONTRACT_ENDPOINT_REGISTRY], debug=debug, ) if error is not None: append_report('Smoketest assertion error', error) else: success = True finally: app.stop() node = ethereum[0] node.send_signal(2) err, out = node.communicate() append_report('Ethereum stdout', out) append_report('Ethereum stderr', err) if success: print_step(f'Smoketest successful') else: print_step(f'Smoketest had errors', error=True) return success
def run_test_set_deposit_limit_crash( raiden_network, token_amount, contract_manager, retry_timeout, ): app1 = raiden_network[0] registry_address = app1.raiden.default_registry.address token_address = deploy_contract_web3( contract_name=CONTRACT_HUMAN_STANDARD_TOKEN, deploy_client=app1.raiden.chain.client, contract_manager=contract_manager, constructor_arguments=( token_amount, 2, 'raiden', 'Rd', ), ) api1 = RaidenAPI(app1.raiden) assert token_address not in api1.get_tokens_list(registry_address) api1.token_network_register( registry_address=registry_address, token_address=token_address, channel_participant_deposit_limit=RED_EYES_PER_CHANNEL_PARTICIPANT_LIMIT, token_network_deposit_limit=RED_EYES_PER_TOKEN_NETWORK_LIMIT, ) exception = RuntimeError('Did not see the token registration within 30 seconds') with gevent.Timeout(seconds=30, exception=exception): wait_for_state_change( app1.raiden, ContractReceiveNewTokenNetwork, { 'token_network': { 'token_address': token_address, }, }, retry_timeout, ) assert token_address in api1.get_tokens_list(registry_address) partner_address = make_address() api1.channel_open( registry_address=app1.raiden.default_registry.address, token_address=token_address, partner_address=partner_address, ) with pytest.raises(DepositOverLimit): api1.set_total_channel_deposit( registry_address=app1.raiden.default_registry.address, token_address=token_address, partner_address=partner_address, total_deposit=10000000000000000000000, )
def test_set_deposit_limit_crash(raiden_network, token_amount, contract_manager, retry_timeout): """The development contracts as of 10/12/2018 were crashing if more than an amount was given Regression test for https://github.com/raiden-network/raiden/issues/3135 """ app1 = raiden_network[0] registry_address = app1.raiden.default_registry.address token_address = deploy_contract_web3( contract_name=CONTRACT_HUMAN_STANDARD_TOKEN, deploy_client=app1.raiden.chain.client, contract_manager=contract_manager, constructor_arguments=( token_amount, 2, 'raiden', 'Rd', ), ) api1 = RaidenAPI(app1.raiden) assert token_address not in api1.get_tokens_list(registry_address) api1.token_network_register(registry_address, token_address) exception = RuntimeError( 'Did not see the token registration within 30 seconds') with gevent.Timeout(seconds=30, exception=exception): wait_for_state_change( app1.raiden, ContractReceiveNewTokenNetwork, { 'token_network': { 'token_address': token_address, }, }, retry_timeout, ) assert token_address in api1.get_tokens_list(registry_address) partner_address = make_address() api1.channel_open( registry_address=app1.raiden.default_registry.address, token_address=token_address, partner_address=partner_address, ) with pytest.raises(DepositOverLimit): api1.set_total_channel_deposit( registry_address=app1.raiden.default_registry.address, token_address=token_address, partner_address=partner_address, total_deposit=10000000000000000000000, )
def test_pfs_send_capacity_updates_on_deposit_and_withdraw( raiden_network: List[App], token_addresses: List[TokenAddress]) -> None: # We need to test if PFSCapacityUpdates and PFSFeeUpdates are being # sent after a deposit and withdraw. # Therefore, we create two Raiden nodes app0 and app1. # The nodes open a channel but do not deposit # a pfs matrix room is mocked to see what is sent to it app0, app1 = raiden_network transport0 = app0.raiden.transport pfs_room_name = make_room_alias(transport0.chain_id, PATH_FINDING_BROADCASTING_ROOM) pfs_room = transport0._broadcast_rooms.get(pfs_room_name) # need to assert for mypy that pfs_room is not None assert isinstance(pfs_room, Room) pfs_room.send_text = MagicMock(spec=pfs_room.send_text) api0 = RaidenAPI(app0.raiden) api0.channel_open( token_address=token_addresses[0], registry_address=app0.raiden.default_registry.address, partner_address=app1.raiden.address, ) # the room should not have been called at channel opening assert pfs_room.send_text.call_count == 0 api0.set_total_channel_deposit( token_address=token_addresses[0], registry_address=app0.raiden.default_registry.address, partner_address=app1.raiden.address, total_deposit=TokenAmount(10), ) # now we expect the room to be called the 1st time with a PFSCapacityUpdate # and a PFSFeeUpdate after the deposit assert "PFSCapacityUpdate" in str(pfs_room.send_text.call_args_list[0]) assert "PFSFeeUpdate" in str(pfs_room.send_text.call_args_list[0]) api0.set_total_channel_withdraw( token_address=token_addresses[0], registry_address=app0.raiden.default_registry.address, partner_address=app1.raiden.address, total_withdraw=WithdrawAmount(5), ) # now we expect the room to be called the 2nd time with a PFSCapacityUpdate # after the withdraw assert "PFSCapacityUpdate" in str(pfs_room.send_text.call_args_list[1]) assert "PFSFeeUpdate" in str(pfs_room.send_text.call_args_list[1])
def test_set_deposit_limit_crash(raiden_network, token_amount, contract_manager, retry_timeout): """The development contracts as of 10/12/2018 were crashing if more than an amount was given Regression test for https://github.com/raiden-network/raiden/issues/3135 """ app1 = raiden_network[0] registry_address = app1.raiden.default_registry.address token_address = deploy_contract_web3( contract_name=CONTRACT_HUMAN_STANDARD_TOKEN, deploy_client=app1.raiden.chain.client, contract_manager=contract_manager, constructor_arguments=( token_amount, 2, 'raiden', 'Rd', ), ) api1 = RaidenAPI(app1.raiden) assert token_address not in api1.get_tokens_list(registry_address) api1.token_network_register(registry_address, token_address) exception = RuntimeError('Did not see the token registration within 30 seconds') with gevent.Timeout(seconds=30, exception=exception): wait_for_state_change( app1.raiden, ContractReceiveNewTokenNetwork, { 'token_network': { 'token_address': token_address, }, }, retry_timeout, ) assert token_address in api1.get_tokens_list(registry_address) partner_address = make_address() api1.channel_open( registry_address=app1.raiden.default_registry.address, token_address=token_address, partner_address=partner_address, ) with pytest.raises(DepositOverLimit): api1.set_total_channel_deposit( registry_address=app1.raiden.default_registry.address, token_address=token_address, partner_address=partner_address, total_deposit=10000000000000000000000, )
def test_token_addresses(raiden_network, token_addresses): """ Test that opening a channel via the API provides the confirmed block and not the latest block. The discrepancy there lead to potential timing issues where the token network was deployed for the state in the "latest" block but not yet in the confirmed state and a BadFunctionCallOutput exception was thrown from web3. Regression test for 4470 """ app0, app1 = raiden_network token_address = token_addresses[0] # Find block where the token network was deployed token_network_address = views.get_token_network_address_by_token_address( views.state_from_app(app0), app0.raiden.default_registry.address, token_address ) last_number = app0.raiden.rpc_client.block_number() for block_number in range(last_number, 0, -1): code = app0.raiden.rpc_client.web3.eth.getCode( to_checksum_address(token_network_address), block_number ) if code == b"": break token_network_deploy_block_number = block_number + 1 api0 = RaidenAPI(app0.raiden) # Emulate the confirmed block being a block where TokenNetwork for token_address # has not been deployed. views.state_from_raiden(app0.raiden).block_hash = app0.raiden.rpc_client.get_block( token_network_deploy_block_number - 1 )["hash"] msg = ( "Opening a channel with a confirmed block where the token network " "has not yet been deployed should raise a TokenNotRegistered error" ) with pytest.raises(TokenNotRegistered): api0.channel_open( registry_address=app0.raiden.default_registry.address, token_address=token_address, partner_address=app1.raiden.address, ) pytest.fail(msg)
def test_transfer_after_connect_works(raiden_network, token_addresses): """ Test that payments work after joining a channel. This makes sure that the connection manager does not leave any partners in a half-healthchecked state that causes problems during payments. Test for https://github.com/raiden-network/raiden/issues/5918 """ registry_address = raiden_network[0].raiden.default_registry.address token_address = token_addresses[0] app0, app1, app2 = raiden_network api0 = RaidenAPI(app0.raiden) api1 = RaidenAPI(app1.raiden) # Open channel between node0 and node2 to not run into the bootstrapping # case when joining the token network api0.channel_open(registry_address, token_address, app2.raiden.address) # Make sure that app1 processed the block where channel open # happened. Otherwise the test becomes flaky because it does not see # potential participants in the network current_block = app0.raiden.get_block_number() wait_for_block(app1.raiden, current_block, 1) api1.token_network_connect( registry_address=registry_address, token_address=token_address, funds=TokenAmount(100), initial_channel_target=2, ) payment_result = api1.transfer_and_wait( registry_address=registry_address, token_address=token_address, amount=PaymentAmount(1), target=app0.raiden.address, ).payment_done.get() assert isinstance(payment_result, EventPaymentSentSuccess)
def test_blockchain_event_processed_interleaved( raiden_network: List[App], token_addresses: List[TokenAddress], restart_node: RestartNode, ): """ Blockchain events must be transformed into state changes and processed by the state machine interleaved. Otherwise problems arise when the creation of the state change is dependent on the state of the state machine. Regression test for: https://github.com/raiden-network/raiden/issues/6444 """ app0, app1 = raiden_network app1.stop() api0 = RaidenAPI(app0.raiden) channel_id = api0.channel_open( registry_address=app0.raiden.default_registry.address, token_address=token_addresses[0], partner_address=app1.raiden.address, ) api0.channel_close( registry_address=app0.raiden.default_registry.address, token_address=token_addresses[0], partner_address=app1.raiden.address, ) # Restart node 1 restart_node(app1) wait_all_apps(raiden_network) # Check correct events assert app1.raiden.wal, "app1.wal not set" app1_state_changes = app1.raiden.wal.storage.get_statechanges_by_range(RANGE_ALL_STATE_CHANGES) assert search_for_item( app1_state_changes, ContractReceiveChannelNew, {"channel_identifier": channel_id} ) assert search_for_item( app1_state_changes, ContractReceiveChannelClosed, {"channel_identifier": channel_id} ) assert not search_for_item( app1_state_changes, ContractReceiveRouteClosed, {"channel_identifier": channel_id} )
def test_raidenapi_channel_lifecycle(raiden_network, token_addresses, deposit, retry_timeout, settle_timeout_max): """Uses RaidenAPI to go through a complete channel lifecycle.""" node1, node2 = raiden_network token_address = token_addresses[0] token_network_address = views.get_token_network_address_by_token_address( views.state_from_app(node1), node1.raiden.default_registry.address, token_address) assert token_network_address api1 = RaidenAPI(node1.raiden) api2 = RaidenAPI(node2.raiden) registry_address = node1.raiden.default_registry.address # nodes don't have a channel, so they are not healthchecking assert api1.get_node_network_state(api2.address) == NetworkState.UNKNOWN assert api2.get_node_network_state(api1.address) == NetworkState.UNKNOWN assert not api1.get_channel_list(registry_address, token_address, api2.address) # Make sure invalid arguments to get_channel_list are caught with pytest.raises(UnknownTokenAddress): api1.get_channel_list(registry_address=registry_address, token_address=None, partner_address=api2.address) address_for_lowest_settle_timeout = make_address() lowest_valid_settle_timeout = node1.raiden.config.reveal_timeout * 2 # Make sure a small settle timeout is not accepted when opening a channel with pytest.raises(InvalidSettleTimeout): api1.channel_open( registry_address=node1.raiden.default_registry.address, token_address=token_address, partner_address=address_for_lowest_settle_timeout, settle_timeout=lowest_valid_settle_timeout - 1, ) # Make sure the smallest settle timeout is accepted api1.channel_open( registry_address=node1.raiden.default_registry.address, token_address=token_address, partner_address=address_for_lowest_settle_timeout, settle_timeout=lowest_valid_settle_timeout, ) address_for_highest_settle_timeout = make_address() highest_valid_settle_timeout = settle_timeout_max # Make sure a large settle timeout is not accepted when opening a channel with pytest.raises(InvalidSettleTimeout): api1.channel_open( registry_address=node1.raiden.default_registry.address, token_address=token_address, partner_address=address_for_highest_settle_timeout, settle_timeout=highest_valid_settle_timeout + 1, ) # Make sure the highest settle timeout is accepted api1.channel_open( registry_address=node1.raiden.default_registry.address, token_address=token_address, partner_address=address_for_highest_settle_timeout, settle_timeout=highest_valid_settle_timeout, ) # open is a synchronous api api1.channel_open(node1.raiden.default_registry.address, token_address, api2.address) channels = api1.get_channel_list(registry_address, token_address, api2.address) assert len(channels) == 1 channel12 = get_channelstate(node1, node2, token_network_address) assert channel.get_status(channel12) == ChannelState.STATE_OPENED channel_event_list1 = api1.get_blockchain_events_channel( token_address, channel12.partner_state.address) assert must_have_event( channel_event_list1, { "event": ChannelEvent.OPENED, "args": { "participant1": to_checksum_address(api1.address), "participant2": to_checksum_address(api2.address), }, }, ) network_event_list1 = api1.get_blockchain_events_token_network( token_address) assert must_have_event(network_event_list1, {"event": ChannelEvent.OPENED}) registry_address = api1.raiden.default_registry.address # Check that giving a 0 total deposit is not accepted with pytest.raises(DepositMismatch): api1.set_total_channel_deposit( registry_address=registry_address, token_address=token_address, partner_address=api2.address, total_deposit=TokenAmount(0), ) # Load the new state with the deposit api1.set_total_channel_deposit( registry_address=registry_address, token_address=token_address, partner_address=api2.address, total_deposit=deposit, ) # let's make sure it's idempotent. Same deposit should raise deposit mismatch limit with pytest.raises(DepositMismatch): api1.set_total_channel_deposit(registry_address, token_address, api2.address, deposit) channel12 = get_channelstate(node1, node2, token_network_address) assert channel.get_status(channel12) == ChannelState.STATE_OPENED assert channel.get_balance(channel12.our_state, channel12.partner_state) == deposit assert channel12.our_state.contract_balance == deposit assert api1.get_channel_list(registry_address, token_address, api2.address) == [channel12] # there is a channel open, they must be healthchecking each other assert api1.get_node_network_state(api2.address) == NetworkState.REACHABLE assert api2.get_node_network_state(api1.address) == NetworkState.REACHABLE event_list2 = api1.get_blockchain_events_channel( token_address, channel12.partner_state.address) assert must_have_event( event_list2, { "event": ChannelEvent.DEPOSIT, "args": { "participant": to_checksum_address(api1.address), "total_deposit": deposit }, }, ) api1.channel_close(registry_address, token_address, api2.address) # Load the new state with the channel closed channel12 = get_channelstate(node1, node2, token_network_address) event_list3 = api1.get_blockchain_events_channel( token_address, channel12.partner_state.address) assert len(event_list3) > len(event_list2) assert must_have_event( event_list3, { "event": ChannelEvent.CLOSED, "args": { "closing_participant": to_checksum_address(api1.address) }, }, ) assert channel.get_status(channel12) == ChannelState.STATE_CLOSED with pytest.raises(UnexpectedChannelState): api1.set_total_channel_deposit(registry_address, token_address, api2.address, deposit + 100) assert wait_for_state_change( node1.raiden, ContractReceiveChannelSettled, { "canonical_identifier": { "token_network_address": token_network_address, "channel_identifier": channel12.identifier, } }, retry_timeout, )
def test_deposit_amount_must_be_smaller_than_the_token_network_limit( raiden_network: List[App], contract_manager: ContractManager, retry_timeout: float) -> None: """The Python API must properly check the requested deposit will not exceed the token network deposit limit. This is a regression test for #3135. As of version `v0.18.1` (commit 786347b23), the proxy was not properly checking that the requested deposit amount was smaller than the smart contract deposit limit. This led to two errors: - The error message was vague and incorrect: "Deposit amount decreased" - The exception used was not handled and crashed the node. This test checks the limit is properly check from the REST API. """ app1 = raiden_network[0] registry_address = app1.raiden.default_registry.address token_supply = 1_000_000 token_address = TokenAddress( deploy_contract_web3( contract_name=CONTRACT_HUMAN_STANDARD_TOKEN, deploy_client=app1.raiden.rpc_client, contract_manager=contract_manager, constructor_arguments=(token_supply, 2, "raiden", "Rd"), )) # Wait until Raiden can start using the token contract. # Here, the block at which the contract was deployed should be confirmed by Raiden. # Therefore, until that block is received. waiting.wait_for_block( raiden=app1.raiden, block_number=BlockNumber(app1.raiden.get_block_number() + DEFAULT_NUMBER_OF_BLOCK_CONFIRMATIONS + 1), retry_timeout=retry_timeout, ) api1 = RaidenAPI(app1.raiden) msg = "Token is not registered yet, it must not be in the token list." assert token_address not in api1.get_tokens_list(registry_address), msg token_network_deposit_limit = TokenAmount(100) api1.token_network_register( registry_address=registry_address, token_address=token_address, channel_participant_deposit_limit=token_network_deposit_limit, token_network_deposit_limit=token_network_deposit_limit, ) exception = RuntimeError( "Did not see the token registration within 30 seconds") with gevent.Timeout(seconds=30, exception=exception): wait_for_state_change( app1.raiden, ContractReceiveNewTokenNetwork, {"token_network": { "token_address": token_address }}, retry_timeout, ) msg = "Token has been registered, yet must be available in the token list." assert token_address in api1.get_tokens_list(registry_address), msg partner_address = make_address() api1.channel_open( registry_address=app1.raiden.default_registry.address, token_address=token_address, partner_address=partner_address, ) with pytest.raises(DepositOverLimit): api1.set_total_channel_deposit( registry_address=app1.raiden.default_registry.address, token_address=token_address, partner_address=partner_address, total_deposit=TokenAmount(token_network_deposit_limit + 1), ) pytest.fail( "The deposit must fail if the requested deposit exceeds the token " "network deposit limit.")
def test_participant_deposit_amount_must_be_smaller_than_the_limit( raiden_network: List[App], contract_manager: ContractManager, retry_timeout: float) -> None: """The Python API must properly check the requested participant deposit will not exceed the smart contract limit. This is companion test for `test_deposit_amount_must_be_smaller_than_the_token_network_limit`. The participant deposit limit was introduced for the bug bounty with the PR https://github.com/raiden-network/raiden-contracts/pull/276/ , the limit is available since version 0.4.0 of the smart contract. """ app1 = raiden_network[0] registry_address = app1.raiden.default_registry.address token_supply = 1_000_000 token_address = TokenAddress( deploy_contract_web3( contract_name=CONTRACT_HUMAN_STANDARD_TOKEN, deploy_client=app1.raiden.rpc_client, contract_manager=contract_manager, constructor_arguments=(token_supply, 2, "raiden", "Rd"), )) api1 = RaidenAPI(app1.raiden) msg = "Token is not registered yet, it must not be in the token list." assert token_address not in api1.get_tokens_list(registry_address), msg # Wait until Raiden can start using the token contract. # Here, the block at which the contract was deployed should be confirmed by Raiden. # Therefore, until that block is received. waiting.wait_for_block( raiden=app1.raiden, block_number=BlockNumber(app1.raiden.get_block_number() + DEFAULT_NUMBER_OF_BLOCK_CONFIRMATIONS + 1), retry_timeout=retry_timeout, ) token_network_participant_deposit_limit = TokenAmount(100) api1.token_network_register( registry_address=registry_address, token_address=token_address, channel_participant_deposit_limit= token_network_participant_deposit_limit, token_network_deposit_limit=TokenAmount(UINT256_MAX), ) exception = RuntimeError( "Did not see the token registration within 30 seconds") with gevent.Timeout(seconds=30, exception=exception): wait_for_state_change( app1.raiden, ContractReceiveNewTokenNetwork, {"token_network": { "token_address": token_address }}, retry_timeout, ) msg = "Token has been registered, yet must be available in the token list." assert token_address in api1.get_tokens_list(registry_address), msg partner_address = make_address() api1.channel_open( registry_address=app1.raiden.default_registry.address, token_address=token_address, partner_address=partner_address, ) with pytest.raises(DepositOverLimit): api1.set_total_channel_deposit( registry_address=app1.raiden.default_registry.address, token_address=token_address, partner_address=partner_address, total_deposit=TokenAmount(token_network_participant_deposit_limit + 1), ) pytest.fail( "The deposit must fail if the requested deposit exceeds the participant deposit limit." )
def run_smoketest( print_step: Callable, append_report: Callable, args: Dict[str, Any], contract_addresses: List[Address], token: ContractProxy, debug: bool, ethereum_nodes: List[HTTPExecutor], ): print_step('Starting Raiden') config = deepcopy(App.DEFAULT_CONFIG) extra_config = args.pop('extra_config', None) if extra_config: merge_dict(config, extra_config) args['config'] = config # Should use basic routing in the smoke test for now # TODO: If we ever utilize a PFS in the smoke test we # need to use the deployed service registry, register the # PFS service there and then change this argument. args['routing_mode'] = RoutingMode.BASIC raiden_stdout = StringIO() maybe_redirect_stdout = contextlib.redirect_stdout(raiden_stdout) if debug: maybe_redirect_stdout = contextlib.nullcontext() with maybe_redirect_stdout: success = False app = None try: app = run_app(**args) raiden_api = RaidenAPI(app.raiden) rest_api = RestAPI(raiden_api) (api_host, api_port) = split_endpoint(args['api_address']) api_server = APIServer(rest_api, config={ 'host': api_host, 'port': api_port }) api_server.start() block = app.raiden.get_block_number( ) + DEFAULT_NUMBER_OF_BLOCK_CONFIRMATIONS # Proxies now use the confirmed block hash to query the chain for # prerequisite checks. Wait a bit here to make sure that the confirmed # block hash contains the deployed token network or else things break wait_for_block( raiden=app.raiden, block_number=block, retry_timeout=1.0, ) raiden_api.channel_open( registry_address=contract_addresses[ CONTRACT_TOKEN_NETWORK_REGISTRY], token_address=to_canonical_address(token.contract.address), partner_address=to_canonical_address(TEST_PARTNER_ADDRESS), ) raiden_api.set_total_channel_deposit( contract_addresses[CONTRACT_TOKEN_NETWORK_REGISTRY], to_canonical_address(token.contract.address), to_canonical_address(TEST_PARTNER_ADDRESS), TEST_DEPOSIT_AMOUNT, ) token_addresses = [to_checksum_address(token.contract.address)] print_step('Running smoketest') error = smoketest_perform_tests( app.raiden, args['transport'], token_addresses, contract_addresses[CONTRACT_ENDPOINT_REGISTRY], ) if error is not None: append_report('Smoketest assertion error', error) else: success = True except: # noqa pylint: disable=bare-except if debug: import pdb # The pylint comment is required when pdbpp is installed pdb.post_mortem() # pylint: disable=no-member else: error = traceback.format_exc() append_report('Smoketest execution error', error) finally: if app is not None: app.stop() app.raiden.get() node_executor = ethereum_nodes[0] node = node_executor.process node.send_signal(signal.SIGINT) try: node.wait(10) except TimeoutExpired: print_step('Ethereum node shutdown unclean, check log!', error=True) node.kill() if isinstance(node_executor.stdio, tuple): logfile = node_executor.stdio[1] logfile.flush() logfile.seek(0) append_report('Ethereum Node log output', logfile.read()) append_report('Raiden Node stdout', raiden_stdout.getvalue()) if success: print_step(f'Smoketest successful') else: print_step(f'Smoketest had errors', error=True) return success
def test_raidenapi_channel_lifecycle( raiden_network: List[RaidenService], token_addresses, deposit, retry_timeout, settle_timeout_max, ): """Uses RaidenAPI to go through a complete channel lifecycle.""" app1, app2 = raiden_network token_address = token_addresses[0] token_network_address = views.get_token_network_address_by_token_address( views.state_from_raiden(app1), app1.default_registry.address, token_address) assert token_network_address api1 = RaidenAPI(app1) api2 = RaidenAPI(app2) registry_address = app1.default_registry.address # nodes don't have a channel, so they are not healthchecking assert api1.get_node_network_state(api2.address) == NetworkState.UNKNOWN assert api2.get_node_network_state(api1.address) == NetworkState.UNKNOWN assert not api1.get_channel_list(registry_address, token_address, api2.address) # Make sure invalid arguments to get_channel_list are caught with pytest.raises(UnknownTokenAddress): api1.get_channel_list(registry_address=registry_address, token_address=None, partner_address=api2.address) address_for_lowest_settle_timeout = make_address() lowest_valid_settle_timeout = app1.config.reveal_timeout * 2 # Make sure a small settle timeout is not accepted when opening a channel with pytest.raises(InvalidSettleTimeout): api1.channel_open( registry_address=app1.default_registry.address, token_address=token_address, partner_address=address_for_lowest_settle_timeout, settle_timeout=BlockTimeout(lowest_valid_settle_timeout - 1), ) # Make sure the smallest settle timeout is accepted api1.channel_open( registry_address=app1.default_registry.address, token_address=token_address, partner_address=address_for_lowest_settle_timeout, settle_timeout=BlockTimeout(lowest_valid_settle_timeout), ) address_for_highest_settle_timeout = make_address() highest_valid_settle_timeout = settle_timeout_max # Make sure a large settle timeout is not accepted when opening a channel with pytest.raises(InvalidSettleTimeout): api1.channel_open( registry_address=app1.default_registry.address, token_address=token_address, partner_address=address_for_highest_settle_timeout, settle_timeout=highest_valid_settle_timeout + 1, ) # Make sure the highest settle timeout is accepted api1.channel_open( registry_address=app1.default_registry.address, token_address=token_address, partner_address=address_for_highest_settle_timeout, settle_timeout=highest_valid_settle_timeout, ) # open is a synchronous api api1.channel_open(app1.default_registry.address, token_address, api2.address) channels = api1.get_channel_list(registry_address, token_address, api2.address) assert len(channels) == 1 channel12 = get_channelstate(app1, app2, token_network_address) assert channel.get_status(channel12) == ChannelState.STATE_OPENED registry_address = api1.raiden.default_registry.address # Check that giving a 0 total deposit is not accepted with pytest.raises(DepositMismatch): api1.set_total_channel_deposit( registry_address=registry_address, token_address=token_address, partner_address=api2.address, total_deposit=TokenAmount(0), ) # Load the new state with the deposit api1.set_total_channel_deposit( registry_address=registry_address, token_address=token_address, partner_address=api2.address, total_deposit=deposit, ) # let's make sure it's idempotent. Same deposit should raise deposit mismatch limit with pytest.raises(DepositMismatch): api1.set_total_channel_deposit(registry_address, token_address, api2.address, deposit) channel12 = get_channelstate(app1, app2, token_network_address) assert channel.get_status(channel12) == ChannelState.STATE_OPENED assert channel.get_balance(channel12.our_state, channel12.partner_state) == deposit assert channel12.our_state.contract_balance == deposit assert api1.get_channel_list(registry_address, token_address, api2.address) == [channel12] # there is a channel open, they must be healthchecking each other assert api1.get_node_network_state(api2.address) == NetworkState.REACHABLE assert api2.get_node_network_state(api1.address) == NetworkState.REACHABLE api1.channel_close(registry_address, token_address, api2.address) # Load the new state with the channel closed channel12 = get_channelstate(app1, app2, token_network_address) assert channel.get_status(channel12) == ChannelState.STATE_CLOSED with pytest.raises(UnexpectedChannelState): api1.set_total_channel_deposit(registry_address, token_address, api2.address, deposit + 100) assert wait_for_state_change( app1, ContractReceiveChannelSettled, { "canonical_identifier": { "token_network_address": token_network_address, "channel_identifier": channel12.identifier, } }, retry_timeout, )
class ConnectionManager: # pragma: no unittest """The ConnectionManager provides a high level abstraction for connecting to a Token network. Note: It is initialized with 0 funds; a connection to the token network will be only established _after_ calling `connect(funds)` """ # XXX Hack: for bootstrapping, the first node on a network opens a channel # with this address to become visible. BOOTSTRAP_ADDR_HEX = to_checksum_address("2" * 40) BOOTSTRAP_ADDR = to_canonical_address(BOOTSTRAP_ADDR_HEX) def __init__(self, raiden: "RaidenService", token_network_address: TokenNetworkAddress): self.raiden = raiden chain_state = views.state_from_raiden(raiden) token_network_state = views.get_token_network_by_address( chain_state, token_network_address ) token_network_registry = views.get_token_network_registry_by_token_network_address( chain_state, token_network_address ) assert token_network_state assert token_network_registry # TODO: # - Add timeout for transaction polling, used to overwrite the RaidenAPI # defaults # - Add a proper selection strategy (#576) self.funds = 0 self.initial_channel_target = 0 self.joinable_funds_target = 0.0 self.raiden = raiden self.registry_address = token_network_registry.address self.token_network_address = token_network_address self.token_address = token_network_state.token_address self.lock = Semaphore() #: protects self.funds and self.initial_channel_target self.api = RaidenAPI(raiden) def connect( self, funds: typing.TokenAmount, initial_channel_target: int = 3, joinable_funds_target: float = 0.4, ) -> None: """Connect to the network. Subsequent calls to `connect` are allowed, but will only affect the spendable funds and the connection strategy parameters for the future. `connect` will not close any channels. Note: the ConnectionManager does not discriminate manually opened channels from automatically opened ones. If the user manually opened channels, those deposit amounts will affect the funding per channel and the number of new channels opened. Args: funds: Target amount of tokens spendable to join the network. initial_channel_target: Target number of channels to open. joinable_funds_target: Amount of funds not initially assigned. """ token = self.raiden.proxy_manager.token(self.token_address) token_balance = token.balance_of(self.raiden.address) if token_balance < funds: raise InvalidAmount( f"Insufficient balance for token {to_checksum_address(self.token_address)}" ) if funds <= 0: raise InvalidAmount("The funds to use in the connection need to be a positive integer") if joinable_funds_target < 0 or joinable_funds_target > 1: raise InvalidAmount( f"joinable_funds_target should be between 0 and 1. Given: {joinable_funds_target}" ) with self.lock: self.funds = funds self.initial_channel_target = initial_channel_target self.joinable_funds_target = joinable_funds_target log_open_channels(self.raiden, self.registry_address, self.token_address, funds) qty_network_channels = views.count_token_network_channels( views.state_from_raiden(self.raiden), self.registry_address, self.token_address ) if not qty_network_channels: log.info( "Bootstrapping token network.", node=to_checksum_address(self.raiden.address), network_id=to_checksum_address(self.registry_address), token_id=to_checksum_address(self.token_address), ) self.api.channel_open( self.registry_address, self.token_address, self.BOOTSTRAP_ADDR ) else: self._open_channels() def leave(self, registry_address: TokenNetworkRegistryAddress) -> List[NettingChannelState]: """ Leave the token network. This implies closing all channels and waiting for all channels to be settled. """ with self.lock: self.initial_channel_target = 0 channels_to_close = views.get_channelstate_open( chain_state=views.state_from_raiden(self.raiden), token_network_registry_address=registry_address, token_address=self.token_address, ) partner_addresses = [ channel_state.partner_state.address for channel_state in channels_to_close ] self.api.channel_batch_close(registry_address, self.token_address, partner_addresses) channel_ids = [channel_state.identifier for channel_state in channels_to_close] waiting.wait_for_settle( self.raiden, registry_address, self.token_address, channel_ids, self.raiden.alarm.sleep_time, ) return channels_to_close def join_channel(self, partner_address: Address, partner_deposit: TokenAmount) -> None: """Will be called, when we were selected as channel partner by another node. It will fund the channel with up to the partners deposit, but not more than remaining funds or the initial funding per channel. If the connection manager has no funds, this is a noop. """ # Consider this race condition: # # - Partner opens the channel and starts the deposit. # - This nodes learns about the new channel, starts ConnectionManager's # retry_connect, which will start a deposit for this half of the # channel. # - This node learns about the partner's deposit before its own. # join_channel is called which will try to deposit again. # # To fix this race, first the node must wait for the pending operations # to finish, because in them could be a deposit, and then deposit must # be called only if the channel is still not funded. token_network_proxy = self.raiden.proxy_manager.token_network(self.token_network_address) # Wait for any pending operation in the channel to complete, before # deciding on the deposit with self.lock, token_network_proxy.channel_operations_lock[partner_address]: channel_state = views.get_channelstate_for( chain_state=views.state_from_raiden(self.raiden), token_network_registry_address=self.registry_address, token_address=self.token_address, partner_address=partner_address, ) if not channel_state: return joining_funds = min( partner_deposit, self._funds_remaining, self._initial_funding_per_partner ) if joining_funds <= 0 or self._leaving_state: return if joining_funds <= channel_state.our_state.contract_balance: return try: self.api.set_total_channel_deposit( self.registry_address, self.token_address, partner_address, joining_funds ) except RaidenRecoverableError: log.info( "Channel not in opened state", node=to_checksum_address(self.raiden.address) ) except InvalidDBData: raise except RaidenUnrecoverableError as e: should_crash = ( self.raiden.config["environment_type"] != Environment.PRODUCTION or self.raiden.config["unrecoverable_error_should_crash"] ) if should_crash: raise log.critical(str(e), node=to_checksum_address(self.raiden.address)) else: log.info( "Joined a channel", node=to_checksum_address(self.raiden.address), partner=to_checksum_address(partner_address), funds=joining_funds, ) def retry_connect(self) -> None: """Will be called when new channels in the token network are detected. If the minimum number of channels was not yet established, it will try to open new channels. If the connection manager has no funds, this is a noop. """ with self.lock: if self._funds_remaining > 0 and not self._leaving_state: self._open_channels() def _find_new_partners(self) -> List[Address]: """ Search the token network for potential channel partners. """ open_channels = views.get_channelstate_open( chain_state=views.state_from_raiden(self.raiden), token_network_registry_address=self.registry_address, token_address=self.token_address, ) known = set(channel_state.partner_state.address for channel_state in open_channels) known.add(self.BOOTSTRAP_ADDR) known.add(self.raiden.address) participants_addresses = views.get_participants_addresses( views.state_from_raiden(self.raiden), self.registry_address, self.token_address ) available_addresses = list(participants_addresses - known) shuffle(available_addresses) new_partners = available_addresses log.debug( "Found partners", node=to_checksum_address(self.raiden.address), number_of_partners=len(available_addresses), ) return new_partners def _join_partner(self, partner: Address) -> None: """ Ensure a channel exists with partner and is funded in our side """ log.info( "Trying to join or fund channel with partner further", node=to_checksum_address(self.raiden.address), partner=to_checksum_address(partner), ) try: self.api.channel_open(self.registry_address, self.token_address, partner) except DuplicatedChannelError: # If channel already exists (either because partner created it, # or it's nonfunded channel), continue to ensure it's funded pass total_deposit = self._initial_funding_per_partner if total_deposit == 0: return try: self.api.set_total_channel_deposit( registry_address=self.registry_address, token_address=self.token_address, partner_address=partner, total_deposit=total_deposit, ) except InvalidDBData: raise except RECOVERABLE_ERRORS: log.info( "Deposit failed", node=to_checksum_address(self.raiden.address), partner=to_checksum_address(partner), ) except RaidenUnrecoverableError: should_crash = ( self.raiden.config["environment_type"] != Environment.PRODUCTION or self.raiden.config["unrecoverable_error_should_crash"] ) if should_crash: raise log.critical( "Deposit failed", node=to_checksum_address(self.raiden.address), partner=to_checksum_address(partner), ) def _open_channels(self) -> bool: """ Open channels until there are `self.initial_channel_target` channels open. Do nothing if there are enough channels open already. Note: - This method must be called with the lock held. Return: - False if no channels could be opened """ open_channels = views.get_channelstate_open( chain_state=views.state_from_raiden(self.raiden), token_network_registry_address=self.registry_address, token_address=self.token_address, ) open_channels = [ channel_state for channel_state in open_channels if channel_state.partner_state.address != self.BOOTSTRAP_ADDR ] funded_channels = [ channel_state for channel_state in open_channels if channel_state.our_state.contract_balance >= self._initial_funding_per_partner ] nonfunded_channels = [ channel_state for channel_state in open_channels if channel_state not in funded_channels ] possible_new_partners = self._find_new_partners() if possible_new_partners == 0: return False # if we already met our target, break if len(funded_channels) >= self.initial_channel_target: return False # if we didn't, but there's no nonfunded channels and no available partners # it means the network is smaller than our target, so we should also break if not nonfunded_channels and possible_new_partners == 0: return False n_to_join = self.initial_channel_target - len(funded_channels) nonfunded_partners = [ channel_state.partner_state.address for channel_state in nonfunded_channels ] # first, fund nonfunded channels, then open and fund with possible_new_partners, # until initial_channel_target of funded channels is met join_partners = (nonfunded_partners + possible_new_partners)[:n_to_join] log.debug( "Spawning greenlets to join partners", node=to_checksum_address(self.raiden.address), num_greenlets=len(join_partners), ) greenlets = set(gevent.spawn(self._join_partner, partner) for partner in join_partners) gevent.joinall(greenlets, raise_error=True) return True @property def _initial_funding_per_partner(self) -> TokenAmount: """The calculated funding per partner depending on configuration and overall funding of the ConnectionManager. Note: - This attribute must be accessed with the lock held. """ if self.initial_channel_target: return TokenAmount( int(self.funds * (1 - self.joinable_funds_target) / self.initial_channel_target) ) return TokenAmount(0) @property def _funds_remaining(self) -> TokenAmount: """The remaining funds after subtracting the already deposited amounts. Note: - This attribute must be accessed with the lock held. """ if self.funds > 0: token = self.raiden.proxy_manager.token(self.token_address) token_balance = token.balance_of(self.raiden.address) sum_deposits = views.get_our_deposits_for_token_network( views.state_from_raiden(self.raiden), self.registry_address, self.token_address ) return TokenAmount(min(self.funds - sum_deposits, token_balance)) return TokenAmount(0) @property def _leaving_state(self) -> bool: """True if the node is leaving the token network. Note: - This attribute must be accessed with the lock held. """ return self.initial_channel_target < 1 def __repr__(self) -> str: if self.raiden.wal is None: return ( f"{self.__class__.__name__}(target={self.initial_channel_target} " "WAL not initialized)" ) open_channels = views.get_channelstate_open( chain_state=views.state_from_raiden(self.raiden), token_network_registry_address=self.registry_address, token_address=self.token_address, ) return ( f"{self.__class__.__name__}(target={self.initial_channel_target} " + f"open_channels={len(open_channels)}:{open_channels!r})" )
def _run_smoketest(): print_step('Starting Raiden') # invoke the raiden app app = run_app(**args) raiden_api = RaidenAPI(app.raiden) rest_api = RestAPI(raiden_api) api_server = APIServer(rest_api) (api_host, api_port) = split_endpoint(args['api_address']) api_server.start(api_host, api_port) raiden_api.channel_open( contract_addresses[CONTRACT_TOKEN_NETWORK_REGISTRY], to_canonical_address(token.contract.address), to_canonical_address(TEST_PARTNER_ADDRESS), None, None, ) raiden_api.set_total_channel_deposit( contract_addresses[CONTRACT_TOKEN_NETWORK_REGISTRY], to_canonical_address(token.contract.address), to_canonical_address(TEST_PARTNER_ADDRESS), TEST_DEPOSIT_AMOUNT, ) smoketest_config['contracts'][ 'registry_address'] = to_checksum_address( contract_addresses[CONTRACT_TOKEN_NETWORK_REGISTRY], ) smoketest_config['contracts'][ 'secret_registry_address'] = to_checksum_address( contract_addresses[CONTRACT_SECRET_REGISTRY], ) smoketest_config['contracts'][ 'discovery_address'] = to_checksum_address( contract_addresses[CONTRACT_ENDPOINT_REGISTRY], ) smoketest_config['contracts']['token_address'] = to_checksum_address( token.contract.address, ) success = False try: print_step('Running smoketest') error = run_smoketests(app.raiden, smoketest_config, debug=debug) if error is not None: append_report('Smoketest assertion error', error) else: success = True finally: app.stop() ethereum.send_signal(2) err, out = ethereum.communicate() append_report('Ethereum init stdout', ethereum_config['init_log_out'].decode('utf-8')) append_report('Ethereum init stderr', ethereum_config['init_log_err'].decode('utf-8')) append_report('Ethereum stdout', out) append_report('Ethereum stderr', err) append_report('Smoketest configuration', json.dumps(smoketest_config)) if success: print_step( f'Smoketest successful, report was written to {report_file}') else: print_step( f'Smoketest had errors, report was written to {report_file}', error=True) return success
class ConnectionManager: """The ConnectionManager provides a high level abstraction for connecting to a Token network. Note: It is initialized with 0 funds; a connection to the token network will be only established _after_ calling `connect(funds)` """ # XXX Hack: for bootstrapping, the first node on a network opens a channel # with this address to become visible. BOOTSTRAP_ADDR_HEX = b'2' * 40 BOOTSTRAP_ADDR = unhexlify(BOOTSTRAP_ADDR_HEX) def __init__(self, raiden, token_network_identifier): chain_state = views.state_from_raiden(raiden) token_network_state = views.get_token_network_by_identifier( chain_state, token_network_identifier, ) token_network_registry = views.get_token_network_registry_by_token_network_identifier( chain_state, token_network_identifier, ) # TODO: # - Add timeout for transaction polling, used to overwrite the RaidenAPI # defaults # - Add a proper selection strategy (#576) self.funds = 0 self.initial_channel_target = 0 self.joinable_funds_target = 0 self.raiden = raiden self.registry_address = token_network_registry.address self.token_network_identifier = token_network_identifier self.token_address = token_network_state.token_address self.lock = Semaphore() #: protects self.funds and self.initial_channel_target self.api = RaidenAPI(raiden) def connect( self, funds: int, initial_channel_target: int = 3, joinable_funds_target: float = 0.4, ): """Connect to the network. Subsequent calls to `connect` are allowed, but will only affect the spendable funds and the connection strategy parameters for the future. `connect` will not close any channels. Note: the ConnectionManager does not discriminate manually opened channels from automatically opened ones. If the user manually opened channels, those deposit amounts will affect the funding per channel and the number of new channels opened. Args: funds: Target amount of tokens spendable to join the network. initial_channel_target: Target number of channels to open. joinable_funds_target: Amount of funds not initially assigned. """ if funds <= 0: raise ValueError('connecting needs a positive value for `funds`') with self.lock: self.funds = funds self.initial_channel_target = initial_channel_target self.joinable_funds_target = joinable_funds_target log_open_channels(self.raiden, self.registry_address, self.token_address, funds) qty_network_channels = views.count_token_network_channels( views.state_from_raiden(self.raiden), self.registry_address, self.token_address, ) if not qty_network_channels: log.debug('bootstrapping token network.') # make ourselves visible self.api.channel_open( self.registry_address, self.token_address, self.BOOTSTRAP_ADDR, ) else: self._open_channels() def leave_async(self, only_receiving=True): """ Async version of `leave()` """ leave_result = AsyncResult() gevent.spawn(self.leave, only_receiving).link(leave_result) return leave_result def leave(self, registry_address, only_receiving=True): """ Leave the token network. This implies closing all channels and waiting for all channels to be settled. Note: By default we're just discarding all channels for which we haven't received anything. This potentially leaves deposits locked in channels after `closing`. This is "safe" from an accounting point of view (deposits can not be lost), but may still be undesirable from a liquidity point of view (deposits will only be freed after manually closing or after the partner closed the channel). If only_receiving is False then we close and settle all channels irrespective of them having received transfers or not. """ with self.lock: self.initial_channel_target = 0 if only_receiving: channels_to_close = views.get_channelstate_for_receiving( views.state_from_raiden(self.raiden), registry_address, self.token_address, ) else: channels_to_close = views.get_channelstate_open( chain_state=views.state_from_raiden(self.raiden), payment_network_id=registry_address, token_address=self.token_address, ) partner_addresses = [ channel_state.partner_state.address for channel_state in channels_to_close ] self.api.channel_batch_close( registry_address, self.token_address, partner_addresses, ) channel_ids = [ channel_state.identifier for channel_state in channels_to_close ] waiting.wait_for_settle( self.raiden, registry_address, self.token_address, channel_ids, self.raiden.alarm.sleep_time, ) return channels_to_close def join_channel(self, partner_address, partner_deposit): """Will be called, when we were selected as channel partner by another node. It will fund the channel with up to the partners deposit, but not more than remaining funds or the initial funding per channel. If the connection manager has no funds, this is a noop. """ with self.lock: joining_funds = min( partner_deposit, self._funds_remaining, self._initial_funding_per_partner, ) if joining_funds <= 0 or self._leaving_state: return self.api.set_total_channel_deposit( self.registry_address, self.token_address, partner_address, joining_funds, ) log.debug( 'joined a channel!', funds=joining_funds, me=pex(self.raiden.address), partner=pex(partner_address), ) def retry_connect(self): """Will be called when new channels in the token network are detected. If the minimum number of channels was not yet established, it will try to open new channels. If the connection manager has no funds, this is a noop. """ with self.lock: if self._funds_remaining <= 0 or self._leaving_state: return open_channels = views.get_channelstate_open( chain_state=views.state_from_raiden(self.raiden), payment_network_id=self.registry_address, token_address=self.token_address, ) if len(open_channels) >= self.initial_channel_target: return self._open_channels() def find_new_partners(self, number: int): """Search the token network for potential channel partners. Args: number: number of partners to return """ open_channels = views.get_channelstate_open( chain_state=views.state_from_raiden(self.raiden), payment_network_id=self.registry_address, token_address=self.token_address, ) known = set(channel_state.partner_state.address for channel_state in open_channels) known.add(self.BOOTSTRAP_ADDR) known.add(self.raiden.address) participants_addresses = views.get_participants_addresses( views.state_from_raiden(self.raiden), self.registry_address, self.token_address, ) available = participants_addresses - known new_partners = list(available)[:number] log.debug('found {} partners'.format(len(available))) return new_partners def _open_channels(self): """ Open channels until there are `self.initial_channel_target` channels open. Do nothing if there are enough channels open already. Note: - This method must be called with the lock held. """ open_channels = views.get_channelstate_open( chain_state=views.state_from_raiden(self.raiden), payment_network_id=self.registry_address, token_address=self.token_address, ) qty_channels_to_open = self.initial_channel_target - len(open_channels) if qty_channels_to_open <= 0: return for partner in self.find_new_partners(qty_channels_to_open): try: self.api.channel_open( self.registry_address, self.token_address, partner, ) except DuplicatedChannelError: # This can fail because of a race condition, where the channel # partner opens first. log.info('partner opened channel first') try: self.api.set_total_channel_deposit( self.registry_address, self.token_address, partner, self._initial_funding_per_partner, ) except AddressWithoutCode: log.warn('connection manager: channel closed just after it was created') except TransactionThrew: log.exception('connection manager: deposit failed') @property def _initial_funding_per_partner(self) -> int: """The calculated funding per partner depending on configuration and overall funding of the ConnectionManager. Note: - This attribute must be accessed with the lock held. """ if self.initial_channel_target: return int( self.funds * (1 - self.joinable_funds_target) / self.initial_channel_target, ) return 0 @property def _funds_remaining(self) -> int: """The remaining funds after subtracting the already deposited amounts. Note: - This attribute must be accessed with the lock held. """ if self.funds > 0: sum_deposits = views.get_our_capacity_for_token_network( views.state_from_raiden(self.raiden), self.registry_address, self.token_address, ) remaining = self.funds - sum_deposits return remaining return 0 @property def _leaving_state(self) -> bool: """True if the node is leaving the token network. Note: - This attribute must be accessed with the lock held. """ return self.initial_channel_target < 1 def __repr__(self) -> str: open_channels = views.get_channelstate_open( chain_state=views.state_from_raiden(self.raiden), payment_network_id=self.registry_address, token_address=self.token_address, ) return f'{self.__class__.__name__}(target={self.initial_channel_target} ' +\ f'channels={len(open_channels)}:{open_channels!r})'
def run_smoketest( print_step: Callable, args: Dict[str, Any], contract_addresses: Dict[str, Address], token: ContractProxy, ): print_step("Starting Raiden") app = None api_server = None try: app = run_app(**args) raiden_api = RaidenAPI(app.raiden) rest_api = RestAPI(raiden_api) (api_host, api_port) = split_endpoint(args["api_address"]) api_server = APIServer(rest_api, config={"host": api_host, "port": api_port}) api_server.start() block = BlockNumber(app.raiden.get_block_number() + DEFAULT_NUMBER_OF_BLOCK_CONFIRMATIONS) # Proxies now use the confirmed block hash to query the chain for # prerequisite checks. Wait a bit here to make sure that the confirmed # block hash contains the deployed token network or else things break wait_for_block(raiden=app.raiden, block_number=block, retry_timeout=1.0) raiden_api.channel_open( registry_address=TokenNetworkRegistryAddress( contract_addresses[CONTRACT_TOKEN_NETWORK_REGISTRY] ), token_address=TokenAddress(to_canonical_address(token.contract.address)), partner_address=ConnectionManager.BOOTSTRAP_ADDR, ) raiden_api.set_total_channel_deposit( registry_address=TokenNetworkRegistryAddress( contract_addresses[CONTRACT_TOKEN_NETWORK_REGISTRY] ), token_address=TokenAddress(to_canonical_address(token.contract.address)), partner_address=ConnectionManager.BOOTSTRAP_ADDR, total_deposit=TEST_DEPOSIT_AMOUNT, ) token_addresses = [to_checksum_address(token.contract.address)] print_step("Running smoketest") raiden_service = app.raiden token_network_added_events = raiden_service.default_registry.filter_token_added_events() events_token_addresses = [ event["args"]["token_address"] for event in token_network_added_events ] assert events_token_addresses == token_addresses token_networks = views.get_token_identifiers( views.state_from_raiden(raiden_service), raiden_service.default_registry.address ) assert len(token_networks) == 1 channel_state = views.get_channelstate_for( chain_state=views.state_from_raiden(raiden_service), token_network_registry_address=raiden_service.default_registry.address, token_address=token_networks[0], partner_address=ConnectionManager.BOOTSTRAP_ADDR, ) assert channel_state distributable = channel.get_distributable( channel_state.our_state, channel_state.partner_state ) assert distributable == TEST_DEPOSIT_AMOUNT assert distributable == channel_state.our_state.contract_balance assert channel.get_status(channel_state) == ChannelState.STATE_OPENED port_number = raiden_service.config.api_port response = requests.get(f"http://localhost:{port_number}/api/v1/channels") assert response.status_code == HTTPStatus.OK response_json = json.loads(response.content) assert response_json[0]["partner_address"] == to_checksum_address( ConnectionManager.BOOTSTRAP_ADDR ) assert response_json[0]["state"] == "opened" assert int(response_json[0]["balance"]) > 0 finally: if api_server is not None: api_server.stop() api_server.greenlet.get() if app is not None: app.stop() app.raiden.greenlet.get()
def run( privatekey, registry_contract_address, secret_registry_contract_address, discovery_contract_address, listen_address, structlog, logfile, scenario, stage_prefix, ): # pylint: disable=unused-argument # TODO: only enabled structlog on "initiators" structlog.configure(structlog, log_file=logfile) (listen_host, listen_port) = split_endpoint(listen_address) config = App.DEFAULT_CONFIG.copy() config['host'] = listen_host config['port'] = listen_port config['privatekey_hex'] = privatekey privatekey_bin = decode_hex(privatekey) rpc_client = JSONRPCClient( '127.0.0.1', 8545, privatekey_bin, ) blockchain_service = BlockChainService(privatekey_bin, rpc_client) discovery = ContractDiscovery( blockchain_service, decode_hex(discovery_contract_address), ) registry = blockchain_service.token_network_registry( registry_contract_address, ) secret_registry = blockchain_service.secret_registry( secret_registry_contract_address, ) throttle_policy = TokenBucket( config['protocol']['throttle_capacity'], config['protocol']['throttle_fill_rate'], ) transport = UDPTransport( discovery=discovery, udpsocket=gevent.server._udp_socket((listen_host, listen_port)), throttle_policy=throttle_policy, config=config['protocol'], ) app = App( config=config, chain=blockchain_service, query_start_block=0, default_registry=registry, default_secret_registry=secret_registry, transport=transport, discovery=discovery, ) app.discovery.register( app.raiden.address, listen_host, listen_port, ) from_block = 0 app.raiden.install_all_blockchain_filters( app.raiden.default_registry, app.raiden.default_secret_registry, from_block, ) if scenario: script = json.load(scenario) tools = ConsoleTools( app.raiden, app.discovery, app.config['settle_timeout'], app.config['reveal_timeout'], ) transfers_by_peer = {} tokens = script['tokens'] token_address = None peer = None our_node = hexlify(app.raiden.address) log.warning('our address is {}'.format(our_node)) for token in tokens: # skip tokens that we're not part of nodes = token['channels'] if our_node not in nodes: continue partner_nodes = [ node for node in nodes if node != our_node ] # allow for prefunded tokens if 'token_address' in token: token_address = token['token_address'] else: token_address = tools.create_token(registry_contract_address) transfers_with_amount = token['transfers_with_amount'] # FIXME: in order to do bidirectional channels, only one side # (i.e. only token['channels'][0]) should # open; others should join by calling # raiden.api.deposit, AFTER the channel came alive! # NOTE: leaving unidirectional for now because it most # probably will get to higher throughput log.warning('Waiting for all nodes to come online') api = RaidenAPI(app.raiden) for node in partner_nodes: api.start_health_check_for(node) while True: all_reachable = all( api.get_node_network_state(node) == NODE_NETWORK_REACHABLE for node in partner_nodes ) if all_reachable: break gevent.sleep(5) log.warning('All nodes are online') if our_node != nodes[-1]: our_index = nodes.index(our_node) peer = nodes[our_index + 1] tools.token_network_register(app.raiden.default_registry.address, token_address) amount = transfers_with_amount[nodes[-1]] while True: try: app.discovery.get(peer.decode('hex')) break except KeyError: log.warning('Error: peer {} not found in discovery'.format(peer)) time.sleep(random.randrange(30)) while True: try: log.warning('Opening channel with {} for {}'.format(peer, token_address)) api.channel_open(app.raiden.default_registry.address, token_address, peer) break except KeyError: log.warning('Error: could not open channel with {}'.format(peer)) time.sleep(random.randrange(30)) while True: try: log.warning('Funding channel with {} for {}'.format(peer, token_address)) api.channel_deposit( app.raiden.default_registry.address, token_address, peer, amount, ) break except Exception: log.warning('Error: could not deposit {} for {}'.format(amount, peer)) time.sleep(random.randrange(30)) if our_index == 0: last_node = nodes[-1] transfers_by_peer[last_node] = int(amount) if stage_prefix is not None: open('{}.stage1'.format(stage_prefix), 'a').close() log.warning('Done with initialization, waiting to continue...') event = gevent.event.Event() gevent.signal(signal.SIGUSR2, event.set) event.wait() transfer_results = {'total_time': 0, 'timestamps': []} def transfer(token_address, amount_per_transfer, total_transfers, peer, is_async): def transfer_(): log.warning('Making {} transfers to {}'.format(total_transfers, peer)) initial_time = time.time() times = [0] * total_transfers for index in range(total_transfers): RaidenAPI(app.raiden).transfer( app.raiden.default_registry.address, token_address.decode('hex'), amount_per_transfer, peer, ) times[index] = time.time() transfer_results['total_time'] = time.time() - initial_time transfer_results['timestamps'] = times log.warning('Making {} transfers took {}'.format( total_transfers, transfer_results['total_time'])) log.warning('Times: {}'.format(times)) if is_async: return gevent.spawn(transfer_) else: transfer_() # If sending to multiple targets, do it asynchronously, otherwise # keep it simple and just send to the single target on my thread. if len(transfers_by_peer) > 1: greenlets = [] for peer_, amount in transfers_by_peer.items(): greenlet = transfer(token_address, 1, amount, peer_, True) if greenlet is not None: greenlets.append(greenlet) gevent.joinall(greenlets) elif len(transfers_by_peer) == 1: for peer_, amount in transfers_by_peer.items(): transfer(token_address, 1, amount, peer_, False) log.warning('Waiting for termination') open('{}.stage2'.format(stage_prefix), 'a').close() log.warning('Waiting for transfers to finish, will write results...') event = gevent.event.Event() gevent.signal(signal.SIGUSR2, event.set) event.wait() open('{}.stage3'.format(stage_prefix), 'a').close() event = gevent.event.Event() gevent.signal(signal.SIGQUIT, event.set) gevent.signal(signal.SIGTERM, event.set) gevent.signal(signal.SIGINT, event.set) event.wait() else: log.warning('No scenario file supplied, doing nothing!') open('{}.stage2'.format(stage_prefix), 'a').close() event = gevent.event.Event() gevent.signal(signal.SIGQUIT, event.set) gevent.signal(signal.SIGTERM, event.set) gevent.signal(signal.SIGINT, event.set) event.wait() app.stop()
def test_channel_lifecycle(raiden_network, token_addresses, deposit, transport_protocol): node1, node2 = raiden_network token_address = token_addresses[0] token_network_identifier = views.get_token_network_identifier_by_token_address( views.state_from_app(node1), node1.raiden.default_registry.address, token_address, ) api1 = RaidenAPI(node1.raiden) api2 = RaidenAPI(node2.raiden) registry_address = node1.raiden.default_registry.address # nodes don't have a channel, so they are not healthchecking assert api1.get_node_network_state(api2.address) == NODE_NETWORK_UNKNOWN assert api2.get_node_network_state(api1.address) == NODE_NETWORK_UNKNOWN assert not api1.get_channel_list(registry_address, token_address, api2.address) # Make sure invalid arguments to get_channel_list are caught with pytest.raises(UnknownTokenAddress): api1.get_channel_list( registry_address=registry_address, token_address=None, partner_address=api2.address, ) # open is a synchronous api api1.channel_open(node1.raiden.default_registry.address, token_address, api2.address) channels = api1.get_channel_list(registry_address, token_address, api2.address) assert len(channels) == 1 channel12 = get_channelstate(node1, node2, token_network_identifier) assert channel.get_status(channel12) == CHANNEL_STATE_OPENED event_list1 = api1.get_blockchain_events_channel( token_address, channel12.partner_state.address, ) assert any( ( event['event'] == ChannelEvent.OPENED and is_same_address( event['args']['participant1'], to_normalized_address(api1.address), ) and is_same_address( event['args']['participant2'], to_normalized_address(api2.address), ) ) for event in event_list1 ) token_events = api1.get_blockchain_events_token_network( token_address, ) assert token_events[0]['event'] == ChannelEvent.OPENED registry_address = api1.raiden.default_registry.address # Load the new state with the deposit api1.set_total_channel_deposit( registry_address, token_address, api2.address, deposit, ) # let's make sure it's idempotent. Same deposit should raise deposit mismatch limit with pytest.raises(DepositMismatch): api1.set_total_channel_deposit( registry_address, token_address, api2.address, deposit, ) channel12 = get_channelstate(node1, node2, token_network_identifier) assert channel.get_status(channel12) == CHANNEL_STATE_OPENED assert channel.get_balance(channel12.our_state, channel12.partner_state) == deposit assert channel12.our_state.contract_balance == deposit assert api1.get_channel_list(registry_address, token_address, api2.address) == [channel12] # there is a channel open, they must be healthchecking each other assert api1.get_node_network_state(api2.address) == NODE_NETWORK_REACHABLE assert api2.get_node_network_state(api1.address) == NODE_NETWORK_REACHABLE event_list2 = api1.get_blockchain_events_channel( token_address, channel12.partner_state.address, ) assert any( ( event['event'] == ChannelEvent.DEPOSIT and is_same_address( event['args']['participant'], to_normalized_address(api1.address), ) and event['args']['total_deposit'] == deposit ) for event in event_list2 ) api1.channel_close(registry_address, token_address, api2.address) # Load the new state with the channel closed channel12 = get_channelstate(node1, node2, token_network_identifier) event_list3 = api1.get_blockchain_events_channel( token_address, channel12.partner_state.address, ) assert len(event_list3) > len(event_list2) assert any( ( event['event'] == ChannelEvent.CLOSED and is_same_address( event['args']['closing_participant'], to_normalized_address(api1.address), ) ) for event in event_list3 ) assert channel.get_status(channel12) == CHANNEL_STATE_CLOSED settlement_block = ( channel12.close_transaction.finished_block_number + channel12.settle_timeout + 10 # arbitrary number of additional blocks, used to wait for the settle() call ) wait_until_block(node1.raiden.chain, settlement_block + DEFAULT_NUMBER_OF_BLOCK_CONFIRMATIONS) state_changes = node1.raiden.wal.storage.get_statechanges_by_identifier( from_identifier=0, to_identifier='latest', ) assert must_contain_entry(state_changes, ContractReceiveChannelSettled, { 'token_network_identifier': token_network_identifier, 'channel_identifier': channel12.identifier, })
def _run_smoketest(): print_step('Starting Raiden') # invoke the raiden app app_ = ctx.invoke(app, **args) raiden_api = RaidenAPI(app_.raiden) rest_api = RestAPI(raiden_api) api_server = APIServer(rest_api) (api_host, api_port) = split_endpoint(args['api_address']) api_server.start(api_host, api_port) raiden_api.channel_open( contract_addresses[CONTRACT_TOKEN_NETWORK_REGISTRY], to_canonical_address(token.contract.address), to_canonical_address(TEST_PARTNER_ADDRESS), None, None, ) raiden_api.set_total_channel_deposit( contract_addresses[CONTRACT_TOKEN_NETWORK_REGISTRY], to_canonical_address(token.contract.address), to_canonical_address(TEST_PARTNER_ADDRESS), TEST_DEPOSIT_AMOUNT, ) smoketest_config['contracts']['registry_address'] = to_checksum_address( contract_addresses[CONTRACT_TOKEN_NETWORK_REGISTRY], ) smoketest_config['contracts']['secret_registry_address'] = to_checksum_address( contract_addresses[CONTRACT_SECRET_REGISTRY], ) smoketest_config['contracts']['discovery_address'] = to_checksum_address( contract_addresses[CONTRACT_ENDPOINT_REGISTRY], ) smoketest_config['contracts']['token_address'] = to_checksum_address( token.contract.address, ) success = False try: print_step('Running smoketest') error = run_smoketests(app_.raiden, smoketest_config, debug=debug) if error is not None: append_report('Smoketest assertion error', error) else: success = True finally: app_.stop() ethereum.send_signal(2) err, out = ethereum.communicate() append_report('Ethereum init stdout', ethereum_config['init_log_out'].decode('utf-8')) append_report('Ethereum init stderr', ethereum_config['init_log_err'].decode('utf-8')) append_report('Ethereum stdout', out) append_report('Ethereum stderr', err) append_report('Smoketest configuration', json.dumps(smoketest_config)) if success: print_step(f'Smoketest successful, report was written to {report_file}') else: print_step(f'Smoketest had errors, report was written to {report_file}', error=True) return success
class ConsoleTools: def __init__(self, raiden_service, discovery, settle_timeout, reveal_timeout): self._chain = raiden_service.chain self._raiden = raiden_service self._api = RaidenAPI(raiden_service) self._discovery = discovery self.settle_timeout = settle_timeout self.reveal_timeout = reveal_timeout def create_token( self, registry_address, initial_alloc=10**6, name='raidentester', symbol='RDT', decimals=2, timeout=60, auto_register=True, ): """ Create a proxy for a new HumanStandardToken (ERC20), that is initialized with Args(below). Per default it will be registered with 'raiden'. Args: initial_alloc (int): amount of initial tokens. name (str): human readable token name. symbol (str): token shorthand symbol. decimals (int): decimal places. timeout (int): timeout in seconds for creation. auto_register (boolean): if True(default), automatically register the token with raiden. Returns: token_address_hex: the hex encoded address of the new token/token. """ contract_path = get_contract_path('HumanStandardToken.sol') # Deploy a new ERC20 token with gevent.Timeout(timeout): token_proxy = self._chain.client.deploy_solidity_contract( 'HumanStandardToken', compile_files_cwd([contract_path]), dict(), (initial_alloc, name, decimals, symbol), contract_path=contract_path, ) token_address_hex = hexlify(token_proxy.contract_address) if auto_register: self.register_token(registry_address, token_address_hex) print("Successfully created {}the token '{}'.".format( 'and registered ' if auto_register else ' ', name, )) return token_address_hex def register_token( self, registry_address_hex: typing.AddressHex, token_address_hex: typing.AddressHex, retry_timeout: typing.NetworkTimeout = DEFAULT_RETRY_TIMEOUT, ) -> TokenNetwork: """ Register a token with the raiden token manager. Args: registry_address: registry address token_address_hex (string): a hex encoded token address. Returns: The token network proxy. """ registry_address = safe_address_decode(registry_address_hex) token_address = safe_address_decode(token_address_hex) registry = self._raiden.chain.token_network_registry(registry_address) token_network_address = registry.add_token(token_address) # Register the channel manager with the raiden registry waiting.wait_for_payment_network( self._raiden, registry.address, token_address, retry_timeout, ) return self._raiden.chain.token_network(token_network_address) def open_channel_with_funding( self, registry_address_hex, token_address_hex, peer_address_hex, total_deposit, settle_timeout=None, reveal_timeout=None, ): """ Convenience method to open a channel. Args: registry_address_hex (str): hex encoded address of the registry for the channel. token_address_hex (str): hex encoded address of the token for the channel. peer_address_hex (str): hex encoded address of the channel peer. total_deposit (int): amount of total funding for the channel. settle_timeout (int): amount of blocks for the settle time (if None use app defaults). reveal_timeout (int): amount of blocks for the reveal time (if None use app defaults). Return: netting_channel: the (newly opened) netting channel object. """ # Check, if peer is discoverable registry_address = safe_address_decode(registry_address_hex) peer_address = safe_address_decode(peer_address_hex) token_address = safe_address_decode(token_address_hex) try: self._discovery.get(peer_address) except KeyError: print('Error: peer {} not found in discovery'.format( peer_address_hex)) return None self._api.channel_open( registry_address, token_address, peer_address, settle_timeout=settle_timeout, reveal_timeout=reveal_timeout, ) return self._api.set_total_channel_deposit( registry_address, token_address, peer_address, total_deposit, ) def wait_for_contract(self, contract_address_hex, timeout=None): """ Wait until a contract is mined Args: contract_address_hex (string): hex encoded address of the contract timeout (int): time to wait for the contract to get mined Returns: True if the contract got mined, false otherwise """ contract_address = safe_address_decode(contract_address_hex) start_time = time.time() result = self._raiden.chain.client.web3.eth.getCode( to_checksum_address(contract_address), ) current_time = time.time() while not result: if timeout and start_time + timeout > current_time: return False result = self._raiden.chain.client.web3.eth.getCode( to_checksum_address(contract_address), ) gevent.sleep(0.5) current_time = time.time() return len(result) > 0
def test_channel_lifecycle(raiden_network, token_addresses, deposit, transport_config): node1, node2 = raiden_network token_address = token_addresses[0] token_network_identifier = views.get_token_network_identifier_by_token_address( views.state_from_app(node1), node1.raiden.default_registry.address, token_address, ) api1 = RaidenAPI(node1.raiden) api2 = RaidenAPI(node2.raiden) registry_address = node1.raiden.default_registry.address # nodes don't have a channel, so they are not healthchecking assert api1.get_node_network_state(api2.address) == NODE_NETWORK_UNKNOWN assert api2.get_node_network_state(api1.address) == NODE_NETWORK_UNKNOWN assert not api1.get_channel_list(registry_address, token_address, api2.address) # open is a synchronous api api1.channel_open(node1.raiden.default_registry.address, token_address, api2.address) channels = api1.get_channel_list(registry_address, token_address, api2.address) assert len(channels) == 1 channel12 = get_channelstate(node1, node2, token_network_identifier) assert channel.get_status(channel12) == CHANNEL_STATE_OPENED event_list1 = api1.get_blockchain_events_channel( token_address, channel12.partner_state.address, ) assert any((event['event'] == ChannelEvent.OPENED and is_same_address( event['args']['participant1'], to_normalized_address(api1.address), ) and is_same_address( event['args']['participant2'], to_normalized_address(api2.address), )) for event in event_list1) token_events = api1.get_blockchain_events_token_network(token_address, ) assert token_events[0]['event'] == ChannelEvent.OPENED registry_address = api1.raiden.default_registry.address # Load the new state with the deposit api1.set_total_channel_deposit( registry_address, token_address, api2.address, deposit, ) # let's make sure it's idempotent. Same deposit should raise deposit mismatch limit with pytest.raises(DepositMismatch): api1.set_total_channel_deposit( registry_address, token_address, api2.address, deposit, ) channel12 = get_channelstate(node1, node2, token_network_identifier) assert channel.get_status(channel12) == CHANNEL_STATE_OPENED assert channel.get_balance(channel12.our_state, channel12.partner_state) == deposit assert channel12.our_state.contract_balance == deposit assert api1.get_channel_list(registry_address, token_address, api2.address) == [channel12] # there is a channel open, they must be healthchecking each other assert api1.get_node_network_state(api2.address) == NODE_NETWORK_REACHABLE assert api2.get_node_network_state(api1.address) == NODE_NETWORK_REACHABLE event_list2 = api1.get_blockchain_events_channel( token_address, channel12.partner_state.address, ) assert any((event['event'] == ChannelEvent.DEPOSIT and is_same_address( event['args']['participant'], to_normalized_address(api1.address), ) and event['args']['total_deposit'] == deposit) for event in event_list2) api1.channel_close(registry_address, token_address, api2.address) # Load the new state with the channel closed channel12 = get_channelstate(node1, node2, token_network_identifier) event_list3 = api1.get_blockchain_events_channel( token_address, channel12.partner_state.address, ) assert len(event_list3) > len(event_list2) assert any((event['event'] == ChannelEvent.CLOSED and is_same_address( event['args']['closing_participant'], to_normalized_address(api1.address), )) for event in event_list3) assert channel.get_status(channel12) == CHANNEL_STATE_CLOSED settlement_block = ( channel12.close_transaction.finished_block_number + channel12.settle_timeout + 10 # arbitrary number of additional blocks, used to wait for the settle() call ) wait_until_block(node1.raiden.chain, settlement_block) state_changes = node1.raiden.wal.storage.get_statechanges_by_identifier( from_identifier=0, to_identifier='latest', ) assert must_contain_entry( state_changes, ContractReceiveChannelSettled, { 'token_network_identifier': token_network_identifier, 'channel_identifier': channel12.identifier, })
def _run_smoketest(): print_step('Starting Raiden') config = deepcopy(App.DEFAULT_CONFIG) if args.get('extra_config', dict()): merge_dict(config, args['extra_config']) del args['extra_config'] args['config'] = config raiden_stdout = StringIO() with contextlib.redirect_stdout(raiden_stdout): try: # invoke the raiden app app = run_app(**args) raiden_api = RaidenAPI(app.raiden) rest_api = RestAPI(raiden_api) api_server = APIServer(rest_api) (api_host, api_port) = split_endpoint(args['api_address']) api_server.start(api_host, api_port) raiden_api.channel_open( registry_address=contract_addresses[CONTRACT_TOKEN_NETWORK_REGISTRY], token_address=to_canonical_address(token.contract.address), partner_address=to_canonical_address(TEST_PARTNER_ADDRESS), ) raiden_api.set_total_channel_deposit( contract_addresses[CONTRACT_TOKEN_NETWORK_REGISTRY], to_canonical_address(token.contract.address), to_canonical_address(TEST_PARTNER_ADDRESS), TEST_DEPOSIT_AMOUNT, ) token_addresses = [to_checksum_address(token.contract.address)] success = False print_step('Running smoketest') error = run_smoketests( app.raiden, args['transport'], token_addresses, contract_addresses[CONTRACT_ENDPOINT_REGISTRY], debug=debug, ) if error is not None: append_report('Smoketest assertion error', error) else: success = True finally: app.stop() app.raiden.get() node = ethereum[0] node.send_signal(2) err, out = node.communicate() append_report('Ethereum stdout', out) append_report('Ethereum stderr', err) append_report('Raiden Node stdout', raiden_stdout.getvalue()) if success: print_step(f'Smoketest successful') else: print_step(f'Smoketest had errors', error=True) return success
def run( privatekey, registry_contract_address, secret_registry_contract_address, discovery_contract_address, listen_address, structlog, logfile, scenario, stage_prefix, ): # pylint: disable=unused-argument # TODO: only enabled structlog on "initiators" structlog.configure(structlog, log_file=logfile) (listen_host, listen_port) = split_endpoint(listen_address) config = App.DEFAULT_CONFIG.copy() config['host'] = listen_host config['port'] = listen_port config['privatekey_hex'] = privatekey privatekey_bin = decode_hex(privatekey) rpc_client = JSONRPCClient( '127.0.0.1', 8545, privatekey_bin, ) blockchain_service = BlockChainService( privatekey_bin, rpc_client, GAS_PRICE, ) discovery = ContractDiscovery( blockchain_service, decode_hex(discovery_contract_address), ) registry = blockchain_service.registry( registry_contract_address, ) secret_registry = blockchain_service.secret_registry( secret_registry_contract_address, ) throttle_policy = TokenBucket( config['protocol']['throttle_capacity'], config['protocol']['throttle_fill_rate'], ) transport = UDPTransport( discovery, server._udp_socket((listen_host, listen_port)), throttle_policy, config['protocol'], dict(), ) app = App( config, blockchain_service, registry, secret_registry, transport, discovery, ) app.discovery.register( app.raiden.address, listen_host, listen_port, ) app.raiden.install_payment_network_filters(app.raiden.default_registry.address) if scenario: script = json.load(scenario) tools = ConsoleTools( app.raiden, app.discovery, app.config['settle_timeout'], app.config['reveal_timeout'], ) transfers_by_peer = {} tokens = script['tokens'] token_address = None peer = None our_node = hexlify(app.raiden.address) log.warning('our address is {}'.format(our_node)) for token in tokens: # skip tokens that we're not part of nodes = token['channels'] if our_node not in nodes: continue partner_nodes = [ node for node in nodes if node != our_node ] # allow for prefunded tokens if 'token_address' in token: token_address = token['token_address'] else: token_address = tools.create_token(registry_contract_address) transfers_with_amount = token['transfers_with_amount'] # FIXME: in order to do bidirectional channels, only one side # (i.e. only token['channels'][0]) should # open; others should join by calling # raiden.api.deposit, AFTER the channel came alive! # NOTE: leaving unidirectional for now because it most # probably will get to higher throughput log.warning('Waiting for all nodes to come online') api = RaidenAPI(app.raiden) for node in partner_nodes: api.start_health_check_for(node) while True: all_reachable = all( api.get_node_network_state(node) == NODE_NETWORK_REACHABLE for node in partner_nodes ) if all_reachable: break gevent.sleep(5) log.warning('All nodes are online') if our_node != nodes[-1]: our_index = nodes.index(our_node) peer = nodes[our_index + 1] tools.token_network_register(app.raiden.default_registry.address, token_address) amount = transfers_with_amount[nodes[-1]] while True: try: app.discovery.get(peer.decode('hex')) break except KeyError: log.warning('Error: peer {} not found in discovery'.format(peer)) time.sleep(random.randrange(30)) while True: try: log.warning('Opening channel with {} for {}'.format(peer, token_address)) api.channel_open(app.raiden.default_registry.address, token_address, peer) break except KeyError: log.warning('Error: could not open channel with {}'.format(peer)) time.sleep(random.randrange(30)) while True: try: log.warning('Funding channel with {} for {}'.format(peer, token_address)) api.channel_deposit( app.raiden.default_registry.address, token_address, peer, amount, ) break except Exception: log.warning('Error: could not deposit {} for {}'.format(amount, peer)) time.sleep(random.randrange(30)) if our_index == 0: last_node = nodes[-1] transfers_by_peer[last_node] = int(amount) else: peer = nodes[-2] if stage_prefix is not None: open('{}.stage1'.format(stage_prefix), 'a').close() log.warning('Done with initialization, waiting to continue...') event = gevent.event.Event() gevent.signal(signal.SIGUSR2, event.set) event.wait() transfer_results = {'total_time': 0, 'timestamps': []} def transfer(token_address, amount_per_transfer, total_transfers, peer, is_async): def transfer_(): log.warning('Making {} transfers to {}'.format(total_transfers, peer)) initial_time = time.time() times = [0] * total_transfers for index in range(total_transfers): RaidenAPI(app.raiden).transfer( app.raiden.default_registry.address, token_address.decode('hex'), amount_per_transfer, peer, ) times[index] = time.time() transfer_results['total_time'] = time.time() - initial_time transfer_results['timestamps'] = times log.warning('Making {} transfers took {}'.format( total_transfers, transfer_results['total_time'])) log.warning('Times: {}'.format(times)) if is_async: return gevent.spawn(transfer_) else: transfer_() # If sending to multiple targets, do it asynchronously, otherwise # keep it simple and just send to the single target on my thread. if len(transfers_by_peer) > 1: greenlets = [] for peer_, amount in transfers_by_peer.items(): greenlet = transfer(token_address, 1, amount, peer_, True) if greenlet is not None: greenlets.append(greenlet) gevent.joinall(greenlets) elif len(transfers_by_peer) == 1: for peer_, amount in transfers_by_peer.items(): transfer(token_address, 1, amount, peer_, False) log.warning('Waiting for termination') open('{}.stage2'.format(stage_prefix), 'a').close() log.warning('Waiting for transfers to finish, will write results...') event = gevent.event.Event() gevent.signal(signal.SIGUSR2, event.set) event.wait() open('{}.stage3'.format(stage_prefix), 'a').close() event = gevent.event.Event() gevent.signal(signal.SIGQUIT, event.set) gevent.signal(signal.SIGTERM, event.set) gevent.signal(signal.SIGINT, event.set) event.wait() else: log.warning('No scenario file supplied, doing nothing!') open('{}.stage2'.format(stage_prefix), 'a').close() event = gevent.event.Event() gevent.signal(signal.SIGQUIT, event.set) gevent.signal(signal.SIGTERM, event.set) gevent.signal(signal.SIGINT, event.set) event.wait() app.stop()
def test_channel_lifecycle(raiden_network, token_addresses, deposit): node1, node2 = raiden_network token_address = token_addresses[0] api1 = RaidenAPI(node1.raiden) api2 = RaidenAPI(node2.raiden) # nodes don't have a channel, so they are not healthchecking assert api1.get_node_network_state(api2.address) == NODE_NETWORK_UNKNOWN assert api2.get_node_network_state(api1.address) == NODE_NETWORK_UNKNOWN assert not api1.get_channel_list(token_address, api2.address) # open is a synchronous api api1.channel_open(token_address, api2.address) channels = api1.get_channel_list(token_address, api2.address) assert len(channels) == 1 channel12 = get_channelstate(node1, node2, token_address) assert channel.get_status(channel12) == CHANNEL_STATE_OPENED event_list1 = api1.get_channel_events( channel12.identifier, channel12.open_transaction.finished_block_number, ) assert event_list1 == [] # Load the new state with the deposit api1.channel_deposit(token_address, api2.address, deposit) channel12 = get_channelstate(node1, node2, token_address) assert channel.get_status(channel12) == CHANNEL_STATE_OPENED assert channel.get_balance(channel12.our_state, channel12.partner_state) == deposit assert channel12.our_state.contract_balance == deposit assert api1.get_channel_list(token_address, api2.address) == [channel12] # there is a channel open, they must be healthchecking each other assert api1.get_node_network_state(api2.address) == NODE_NETWORK_REACHABLE assert api2.get_node_network_state(api1.address) == NODE_NETWORK_REACHABLE event_list2 = api1.get_channel_events( channel12.identifier, channel12.open_transaction.finished_block_number, ) assert any((event['_event_type'] == b'ChannelNewBalance' and event['participant'] == address_encoder(api1.address)) for event in event_list2) api1.channel_close(token_address, api2.address) node1.raiden.poll_blockchain_events() # Load the new state with the channel closed channel12 = get_channelstate(node1, node2, token_address) event_list3 = api1.get_channel_events( channel12.identifier, channel12.open_transaction.finished_block_number, ) assert len(event_list3) > len(event_list2) assert any((event['_event_type'] == b'ChannelClosed' and event['closing_address'] == address_encoder(api1.address)) for event in event_list3) assert channel.get_status(channel12) == CHANNEL_STATE_CLOSED settlement_block = ( channel12.close_transaction.finished_block_number + channel12.settle_timeout + 10 # arbitrary number of additional blocks, used to wait for the settle() call ) wait_until_block(node1.raiden.chain, settlement_block) # Load the new state with the channel settled channel12 = get_channelstate(node1, node2, token_address) node1.raiden.poll_blockchain_events() assert channel.get_status(channel12) == CHANNEL_STATE_SETTLED
class ConnectionManager: """The ConnectionManager provides a high level abstraction for connecting to a Token network. Note: It is initialized with 0 funds; a connection to the token network will be only established _after_ calling `connect(funds)` """ # XXX Hack: for bootstrapping, the first node on a network opens a channel # with this address to become visible. BOOTSTRAP_ADDR_HEX = b'2' * 40 BOOTSTRAP_ADDR = unhexlify(BOOTSTRAP_ADDR_HEX) def __init__(self, raiden, token_network_identifier): chain_state = views.state_from_raiden(raiden) token_network_state = views.get_token_network_by_identifier( chain_state, token_network_identifier, ) token_network_registry = views.get_token_network_registry_by_token_network_identifier( chain_state, token_network_identifier, ) # TODO: # - Add timeout for transaction polling, used to overwrite the RaidenAPI # defaults # - Add a proper selection strategy (#576) self.funds = 0 self.initial_channel_target = 0 self.joinable_funds_target = 0 self.raiden = raiden self.registry_address = token_network_registry.address self.token_network_identifier = token_network_identifier self.token_address = token_network_state.token_address self.lock = Semaphore() #: protects self.funds and self.initial_channel_target self.api = RaidenAPI(raiden) def connect( self, funds: typing.TokenAmount, initial_channel_target: int = 3, joinable_funds_target: float = 0.4, ): """Connect to the network. Subsequent calls to `connect` are allowed, but will only affect the spendable funds and the connection strategy parameters for the future. `connect` will not close any channels. Note: the ConnectionManager does not discriminate manually opened channels from automatically opened ones. If the user manually opened channels, those deposit amounts will affect the funding per channel and the number of new channels opened. Args: funds: Target amount of tokens spendable to join the network. initial_channel_target: Target number of channels to open. joinable_funds_target: Amount of funds not initially assigned. """ token = self.raiden.chain.token(self.token_address) token_balance = token.balance_of(self.raiden.address) if token_balance < funds: raise InvalidAmount( f'Insufficient balance for token {pex(self.token_address)}', ) if funds <= 0: raise InvalidAmount( 'The funds to use in the connection need to be a positive integer', ) with self.lock: self.funds = funds self.initial_channel_target = initial_channel_target self.joinable_funds_target = joinable_funds_target log_open_channels(self.raiden, self.registry_address, self.token_address, funds) qty_network_channels = views.count_token_network_channels( views.state_from_raiden(self.raiden), self.registry_address, self.token_address, ) if not qty_network_channels: log.debug('bootstrapping token network.') # make ourselves visible self.api.channel_open( self.registry_address, self.token_address, self.BOOTSTRAP_ADDR, ) else: self._open_channels() def leave_async(self): """ Async version of `leave()` """ leave_result = AsyncResult() gevent.spawn(self.leave).link(leave_result) return leave_result def leave(self, registry_address): """ Leave the token network. This implies closing all channels and waiting for all channels to be settled. """ with self.lock: self.initial_channel_target = 0 channels_to_close = views.get_channelstate_open( chain_state=views.state_from_raiden(self.raiden), payment_network_id=registry_address, token_address=self.token_address, ) partner_addresses = [ channel_state.partner_state.address for channel_state in channels_to_close ] self.api.channel_batch_close( registry_address, self.token_address, partner_addresses, ) channel_ids = [ channel_state.identifier for channel_state in channels_to_close ] waiting.wait_for_settle( self.raiden, registry_address, self.token_address, channel_ids, self.raiden.alarm.sleep_time, ) return channels_to_close def join_channel(self, partner_address, partner_deposit): """Will be called, when we were selected as channel partner by another node. It will fund the channel with up to the partners deposit, but not more than remaining funds or the initial funding per channel. If the connection manager has no funds, this is a noop. """ with self.lock: joining_funds = min( partner_deposit, self._funds_remaining, self._initial_funding_per_partner, ) if joining_funds <= 0 or self._leaving_state: return try: self.api.set_total_channel_deposit( self.registry_address, self.token_address, partner_address, joining_funds, ) except RaidenRecoverableError: log.exception('connection manager join: channel not in opened state') else: log.debug( 'joined a channel!', funds=joining_funds, me=pex(self.raiden.address), partner=pex(partner_address), ) def retry_connect(self): """Will be called when new channels in the token network are detected. If the minimum number of channels was not yet established, it will try to open new channels. If the connection manager has no funds, this is a noop. """ with self.lock: if self._funds_remaining > 0 and not self._leaving_state: self._open_channels() def _find_new_partners(self): """ Search the token network for potential channel partners. """ open_channels = views.get_channelstate_open( chain_state=views.state_from_raiden(self.raiden), payment_network_id=self.registry_address, token_address=self.token_address, ) known = set(channel_state.partner_state.address for channel_state in open_channels) known.add(self.BOOTSTRAP_ADDR) known.add(self.raiden.address) participants_addresses = views.get_participants_addresses( views.state_from_raiden(self.raiden), self.registry_address, self.token_address, ) available = participants_addresses - known available = list(available) shuffle(available) new_partners = available log.debug('found {} partners'.format(len(available))) return new_partners def _join_partner(self, partner: Address): """ Ensure a channel exists with partner and is funded in our side """ try: self.api.channel_open( self.registry_address, self.token_address, partner, ) except DuplicatedChannelError: # If channel already exists (either because partner created it, # or it's nonfunded channel), continue to ensure it's funded pass try: self.api.set_total_channel_deposit( self.registry_address, self.token_address, partner, self._initial_funding_per_partner, ) except TransactionThrew: log.exception('connection manager: deposit failed') except RaidenRecoverableError: log.exception('connection manager: channel not in opened state') except InsufficientFunds as e: log.error(f'connection manager: {str(e)}') def _open_channels(self) -> bool: """ Open channels until there are `self.initial_channel_target` channels open. Do nothing if there are enough channels open already. Note: - This method must be called with the lock held. Return: - False if no channels could be opened """ open_channels = views.get_channelstate_open( chain_state=views.state_from_raiden(self.raiden), payment_network_id=self.registry_address, token_address=self.token_address, ) # don't consider the bootstrap channel open_channels = [ channel_state for channel_state in open_channels if channel_state.partner_state.address != self.BOOTSTRAP_ADDR ] funded_channels = [ channel_state for channel_state in open_channels if channel_state.our_state.contract_balance >= self._initial_funding_per_partner ] nonfunded_channels = [ channel_state for channel_state in open_channels if channel_state not in funded_channels ] possible_new_partners = self._find_new_partners() if possible_new_partners == 0: return False # if we already met our target, break if len(funded_channels) >= self.initial_channel_target: return False # if we didn't, but there's no nonfunded channels and no available partners # it means the network is smaller than our target, so we should also break if not nonfunded_channels and possible_new_partners == 0: return False n_to_join = self.initial_channel_target - len(funded_channels) nonfunded_partners = [ channel_state.partner_state.address for channel_state in nonfunded_channels ] # first, fund nonfunded channels, then open and fund with possible_new_partners, # until initial_channel_target of funded channels is met join_partners = (nonfunded_partners + possible_new_partners)[:n_to_join] greenlets = [ gevent.spawn(self._join_partner, partner) for partner in join_partners ] gevent.joinall(greenlets, raise_error=True) return True @property def _initial_funding_per_partner(self) -> int: """The calculated funding per partner depending on configuration and overall funding of the ConnectionManager. Note: - This attribute must be accessed with the lock held. """ if self.initial_channel_target: return int( self.funds * (1 - self.joinable_funds_target) / self.initial_channel_target, ) return 0 @property def _funds_remaining(self) -> int: """The remaining funds after subtracting the already deposited amounts. Note: - This attribute must be accessed with the lock held. """ if self.funds > 0: token = self.raiden.chain.token(self.token_address) token_balance = token.balance_of(self.raiden.address) sum_deposits = views.get_our_capacity_for_token_network( views.state_from_raiden(self.raiden), self.registry_address, self.token_address, ) return min(self.funds - sum_deposits, token_balance) return 0 @property def _leaving_state(self) -> bool: """True if the node is leaving the token network. Note: - This attribute must be accessed with the lock held. """ return self.initial_channel_target < 1 def __repr__(self) -> str: open_channels = views.get_channelstate_open( chain_state=views.state_from_raiden(self.raiden), payment_network_id=self.registry_address, token_address=self.token_address, ) return f'{self.__class__.__name__}(target={self.initial_channel_target} ' +\ f'channels={len(open_channels)}:{open_channels!r})'
def _run_smoketest(): print_step('Starting Raiden') config = deepcopy(App.DEFAULT_CONFIG) if args.get('extra_config', dict()): merge_dict(config, args['extra_config']) del args['extra_config'] args['config'] = config raiden_stdout = StringIO() with contextlib.redirect_stdout(raiden_stdout): app = run_app(**args) try: raiden_api = RaidenAPI(app.raiden) rest_api = RestAPI(raiden_api) (api_host, api_port) = split_endpoint(args['api_address']) api_server = APIServer(rest_api, config={ 'host': api_host, 'port': api_port }) api_server.start() block = app.raiden.get_block_number( ) + DEFAULT_NUMBER_OF_BLOCK_CONFIRMATIONS # Proxies now use the confirmed block hash to query the chain for # prerequisite checks. Wait a bit here to make sure that the confirmed # block hash contains the deployed token network or else things break wait_for_block( raiden=app.raiden, block_number=block, retry_timeout=1.0, ) raiden_api.channel_open( registry_address=contract_addresses[ CONTRACT_TOKEN_NETWORK_REGISTRY], token_address=to_canonical_address(token.contract.address), partner_address=to_canonical_address(TEST_PARTNER_ADDRESS), ) raiden_api.set_total_channel_deposit( contract_addresses[CONTRACT_TOKEN_NETWORK_REGISTRY], to_canonical_address(token.contract.address), to_canonical_address(TEST_PARTNER_ADDRESS), TEST_DEPOSIT_AMOUNT, ) token_addresses = [to_checksum_address(token.contract.address)] success = False print_step('Running smoketest') error = run_smoketests( app.raiden, args['transport'], token_addresses, contract_addresses[CONTRACT_ENDPOINT_REGISTRY], debug=debug, orig_stdout=stdout, ) if error is not None: append_report('Smoketest assertion error', error) else: success = True finally: app.stop() app.raiden.get() node = ethereum[0] node.send_signal(2) err, out = node.communicate() append_report('Ethereum stdout', out) append_report('Ethereum stderr', err) append_report('Raiden Node stdout', raiden_stdout.getvalue()) if success: print_step(f'Smoketest successful') else: print_step(f'Smoketest had errors', error=True) return success
def test_channel_lifecycle(raiden_network, token_addresses, deposit, transport_config): node1, node2 = raiden_network token_address = token_addresses[0] token_network_identifier = views.get_token_network_identifier_by_token_address( views.state_from_app(node1), node1.raiden.default_registry.address, token_address, ) api1 = RaidenAPI(node1.raiden) api2 = RaidenAPI(node2.raiden) registry_address = node1.raiden.default_registry.address if transport_config.protocol == TransportProtocol.UDP: # nodes don't have a channel, so they are not healthchecking assert api1.get_node_network_state( api2.address) == NODE_NETWORK_UNKNOWN assert api2.get_node_network_state( api1.address) == NODE_NETWORK_UNKNOWN elif transport_config.protocol == TransportProtocol.MATRIX: # with Matrix nodes do not need a health check to know each others reachability assert api1.get_node_network_state( api2.address) == NODE_NETWORK_UNREACHABLE assert api2.get_node_network_state( api1.address) == NODE_NETWORK_UNREACHABLE assert not api1.get_channel_list(registry_address, token_address, api2.address) # open is a synchronous api api1.channel_open(node1.raiden.default_registry.address, token_address, api2.address) channels = api1.get_channel_list(registry_address, token_address, api2.address) assert len(channels) == 1 channel12 = get_channelstate(node1, node2, token_network_identifier) assert channel.get_status(channel12) == CHANNEL_STATE_OPENED event_list1 = api1.get_channel_events( channel12.identifier, channel12.open_transaction.finished_block_number, ) assert event_list1 == [] token_events = api1.get_token_network_events( token_address, channel12.open_transaction.finished_block_number, ) assert token_events[0]['event'] == EVENT_CHANNEL_NEW registry_address = api1.raiden.default_registry.address # Load the new state with the deposit api1.channel_deposit( registry_address, token_address, api2.address, deposit, ) channel12 = get_channelstate(node1, node2, token_network_identifier) assert channel.get_status(channel12) == CHANNEL_STATE_OPENED assert channel.get_balance(channel12.our_state, channel12.partner_state) == deposit assert channel12.our_state.contract_balance == deposit assert api1.get_channel_list(registry_address, token_address, api2.address) == [channel12] # there is a channel open, they must be healthchecking each other assert api1.get_node_network_state(api2.address) == NODE_NETWORK_REACHABLE assert api2.get_node_network_state(api1.address) == NODE_NETWORK_REACHABLE event_list2 = api1.get_channel_events( channel12.identifier, channel12.open_transaction.finished_block_number, ) assert any( (event['event'] == EVENT_CHANNEL_NEW_BALANCE and is_same_address( event['args']['registry_address'], to_normalized_address(registry_address), ) and is_same_address( event['args']['participant'], to_normalized_address(api1.address), )) for event in event_list2) api1.channel_close(registry_address, token_address, api2.address) # Load the new state with the channel closed channel12 = get_channelstate(node1, node2, token_network_identifier) event_list3 = api1.get_channel_events( channel12.identifier, channel12.open_transaction.finished_block_number, ) assert len(event_list3) > len(event_list2) assert any((event['event'] == EVENT_CHANNEL_CLOSED and is_same_address( event['args']['registry_address'], to_normalized_address(registry_address), ) and is_same_address( event['args']['closing_address'], to_normalized_address(api1.address), )) for event in event_list3) assert channel.get_status(channel12) == CHANNEL_STATE_CLOSED settlement_block = ( channel12.close_transaction.finished_block_number + channel12.settle_timeout + 10 # arbitrary number of additional blocks, used to wait for the settle() call ) wait_until_block(node1.raiden.chain, settlement_block) # Load the new state with the channel settled channel12 = get_channelstate(node1, node2, token_network_identifier) assert channel.get_status(channel12) == CHANNEL_STATE_SETTLED
def run_test_raidenapi_channel_lifecycle(raiden_network, token_addresses, deposit, retry_timeout): node1, node2 = raiden_network token_address = token_addresses[0] token_network_identifier = views.get_token_network_identifier_by_token_address( views.state_from_app(node1), node1.raiden.default_registry.address, token_address) api1 = RaidenAPI(node1.raiden) api2 = RaidenAPI(node2.raiden) registry_address = node1.raiden.default_registry.address # nodes don't have a channel, so they are not healthchecking assert api1.get_node_network_state(api2.address) == NODE_NETWORK_UNKNOWN assert api2.get_node_network_state(api1.address) == NODE_NETWORK_UNKNOWN assert not api1.get_channel_list(registry_address, token_address, api2.address) # Make sure invalid arguments to get_channel_list are caught with pytest.raises(UnknownTokenAddress): api1.get_channel_list(registry_address=registry_address, token_address=None, partner_address=api2.address) # open is a synchronous api api1.channel_open(node1.raiden.default_registry.address, token_address, api2.address) channels = api1.get_channel_list(registry_address, token_address, api2.address) assert len(channels) == 1 channel12 = get_channelstate(node1, node2, token_network_identifier) assert channel.get_status(channel12) == CHANNEL_STATE_OPENED channel_event_list1 = api1.get_blockchain_events_channel( token_address, channel12.partner_state.address) assert must_have_event( channel_event_list1, { "event": ChannelEvent.OPENED, "args": { "participant1": to_checksum_address(api1.address), "participant2": to_checksum_address(api2.address), }, }, ) network_event_list1 = api1.get_blockchain_events_token_network( token_address) assert must_have_event(network_event_list1, {"event": ChannelEvent.OPENED}) registry_address = api1.raiden.default_registry.address # Check that giving a 0 total deposit is not accepted with pytest.raises(DepositMismatch): api1.set_total_channel_deposit( registry_address=registry_address, token_address=token_address, partner_address=api2.address, total_deposit=0, ) # Load the new state with the deposit api1.set_total_channel_deposit( registry_address=registry_address, token_address=token_address, partner_address=api2.address, total_deposit=deposit, ) # let's make sure it's idempotent. Same deposit should raise deposit mismatch limit with pytest.raises(DepositMismatch): api1.set_total_channel_deposit(registry_address, token_address, api2.address, deposit) channel12 = get_channelstate(node1, node2, token_network_identifier) assert channel.get_status(channel12) == CHANNEL_STATE_OPENED assert channel.get_balance(channel12.our_state, channel12.partner_state) == deposit assert channel12.our_state.contract_balance == deposit assert api1.get_channel_list(registry_address, token_address, api2.address) == [channel12] # there is a channel open, they must be healthchecking each other assert api1.get_node_network_state(api2.address) == NODE_NETWORK_REACHABLE assert api2.get_node_network_state(api1.address) == NODE_NETWORK_REACHABLE event_list2 = api1.get_blockchain_events_channel( token_address, channel12.partner_state.address) assert must_have_event( event_list2, { "event": ChannelEvent.DEPOSIT, "args": { "participant": to_checksum_address(api1.address), "total_deposit": deposit }, }, ) api1.channel_close(registry_address, token_address, api2.address) # Load the new state with the channel closed channel12 = get_channelstate(node1, node2, token_network_identifier) event_list3 = api1.get_blockchain_events_channel( token_address, channel12.partner_state.address) assert len(event_list3) > len(event_list2) assert must_have_event( event_list3, { "event": ChannelEvent.CLOSED, "args": { "closing_participant": to_checksum_address(api1.address) }, }, ) assert channel.get_status(channel12) == CHANNEL_STATE_CLOSED assert wait_for_state_change( node1.raiden, ContractReceiveChannelSettled, { "token_network_identifier": token_network_identifier, "channel_identifier": channel12.identifier, }, retry_timeout, )
def test_channel_lifecycle(raiden_network, token_addresses, deposit, transport_config): node1, node2 = raiden_network token_address = token_addresses[0] token_network_identifier = views.get_token_network_identifier_by_token_address( views.state_from_app(node1), node1.raiden.default_registry.address, token_address, ) api1 = RaidenAPI(node1.raiden) api2 = RaidenAPI(node2.raiden) registry_address = node1.raiden.default_registry.address # nodes don't have a channel, so they are not healthchecking assert api1.get_node_network_state(api2.address) == NODE_NETWORK_UNKNOWN assert api2.get_node_network_state(api1.address) == NODE_NETWORK_UNKNOWN assert not api1.get_channel_list(registry_address, token_address, api2.address) # open is a synchronous api api1.channel_open(node1.raiden.default_registry.address, token_address, api2.address) channels = api1.get_channel_list(registry_address, token_address, api2.address) assert len(channels) == 1 channel12 = get_channelstate(node1, node2, token_network_identifier) assert channel.get_status(channel12) == CHANNEL_STATE_OPENED event_list1 = api1.get_channel_events( token_address, channel12.partner_state.address, channel12.open_transaction.finished_block_number, ) assert any( ( event['event'] == EVENT_CHANNEL_OPENED and is_same_address( event['args']['participant1'], to_normalized_address(api1.address), ) and is_same_address( event['args']['participant2'], to_normalized_address(api2.address), ) ) for event in event_list1 ) token_events = api1.get_token_network_events( token_address, channel12.open_transaction.finished_block_number, ) assert token_events[0]['event'] == EVENT_CHANNEL_OPENED registry_address = api1.raiden.default_registry.address # Load the new state with the deposit api1.set_total_channel_deposit( registry_address, token_address, api2.address, deposit, ) # let's make sure it's idempotent api1.set_total_channel_deposit( registry_address, token_address, api2.address, deposit, ) channel12 = get_channelstate(node1, node2, token_network_identifier) assert channel.get_status(channel12) == CHANNEL_STATE_OPENED assert channel.get_balance(channel12.our_state, channel12.partner_state) == deposit assert channel12.our_state.contract_balance == deposit assert api1.get_channel_list(registry_address, token_address, api2.address) == [channel12] # there is a channel open, they must be healthchecking each other assert api1.get_node_network_state(api2.address) == NODE_NETWORK_REACHABLE assert api2.get_node_network_state(api1.address) == NODE_NETWORK_REACHABLE event_list2 = api1.get_channel_events( token_address, channel12.partner_state.address, channel12.open_transaction.finished_block_number, ) assert any( ( event['event'] == EVENT_CHANNEL_DEPOSIT and is_same_address( event['args']['participant'], to_normalized_address(api1.address), ) and event['args']['total_deposit'] == deposit ) for event in event_list2 ) api1.channel_close(registry_address, token_address, api2.address) # Load the new state with the channel closed channel12 = get_channelstate(node1, node2, token_network_identifier) event_list3 = api1.get_channel_events( token_address, channel12.partner_state.address, channel12.open_transaction.finished_block_number, ) assert len(event_list3) > len(event_list2) assert any( ( event['event'] == EVENT_CHANNEL_CLOSED and is_same_address( event['args']['closing_participant'], to_normalized_address(api1.address), ) ) for event in event_list3 ) assert channel.get_status(channel12) == CHANNEL_STATE_CLOSED settlement_block = ( channel12.close_transaction.finished_block_number + channel12.settle_timeout + 10 # arbitrary number of additional blocks, used to wait for the settle() call ) wait_until_block(node1.raiden.chain, settlement_block) # Load the new state with the channel settled channel12 = get_channelstate(node1, node2, token_network_identifier) assert channel.get_status(channel12) == CHANNEL_STATE_SETTLED
class ConsoleTools: """ Some functions to make working in the console easier. """ def __init__(self, raiden_service: RaidenService) -> None: self._raiden = raiden_service self._api = RaidenAPI(raiden_service) def create_token( self, registry_address_hex: AddressHex, initial_alloc: int = 10**6, name: str = "raidentester", symbol: str = "RDT", decimals: int = 2, timeout: int = 60, auto_register: bool = True, ) -> AddressHex: """ Create a proxy for a new HumanStandardToken (ERC20), that is initialized with Args(below). Per default it will be registered with 'raiden'. Args: registry_address_hex: a hex encoded registry address. initial_alloc: amount of initial tokens. name: human readable token name. symbol: token shorthand symbol. decimals: decimal places. timeout: timeout in seconds for creation. auto_register: if True(default), automatically register the token with raiden. Returns: token_address_hex: the hex encoded address of the new token/token. """ with gevent.Timeout(timeout): contract_proxy, _ = self._raiden.rpc_client.deploy_single_contract( contract_name=CONTRACT_HUMAN_STANDARD_TOKEN, contract=self._raiden.contract_manager.get_contract( CONTRACT_HUMAN_STANDARD_TOKEN), constructor_parameters=(initial_alloc, name, decimals, symbol), ) token_address = Address( to_canonical_address(contract_proxy.address)) token_address_hex = to_hex_address(token_address) if auto_register: self.register_token(registry_address_hex, token_address_hex) print("Successfully created {}the token '{}'.".format( "and registered " if auto_register else "", name)) return token_address_hex def register_token( self, registry_address_hex: AddressHex, token_address_hex: AddressHex, retry_timeout: NetworkTimeout = DEFAULT_RETRY_TIMEOUT, ) -> TokenNetwork: """ Register a token with the raiden token manager. Args: registry_address_hex: a hex encoded registry address. token_address_hex: a hex encoded token address. retry_timeout: the retry timeout Returns: The token network proxy. """ registry_address = TokenNetworkRegistryAddress( to_canonical_address(registry_address_hex)) token_address = TokenAddress(to_canonical_address(token_address_hex)) registry = self._raiden.proxy_manager.token_network_registry( registry_address, BLOCK_ID_LATEST) token_network_address = registry.add_token( token_address=token_address, channel_participant_deposit_limit=TokenAmount(UINT256_MAX), token_network_deposit_limit=TokenAmount(UINT256_MAX), given_block_identifier=BLOCK_ID_LATEST, ) waiting.wait_for_token_network(self._raiden, registry.address, token_address, retry_timeout) return self._raiden.proxy_manager.token_network( token_network_address, BLOCK_ID_LATEST) def open_channel_with_funding( self, registry_address_hex: AddressHex, token_address_hex: AddressHex, peer_address_hex: AddressHex, total_deposit: TokenAmount, settle_timeout: BlockTimeout = None, ) -> None: """ Convenience method to open a channel. Args: registry_address_hex: hex encoded address of the registry for the channel. token_address_hex: hex encoded address of the token for the channel. peer_address_hex: hex encoded address of the channel peer. total_deposit: amount of total funding for the channel. settle_timeout: amount of blocks for the settle time (if None use app defaults). Return: netting_channel: the (newly opened) netting channel object. """ # Check, if peer is discoverable registry_address = TokenNetworkRegistryAddress( to_canonical_address(registry_address_hex)) peer_address = to_canonical_address(peer_address_hex) token_address = TokenAddress(to_canonical_address(token_address_hex)) self._api.channel_open(registry_address, token_address, peer_address, settle_timeout=settle_timeout) self._api.set_total_channel_deposit(registry_address, token_address, peer_address, total_deposit) def wait_for_contract(self, contract_address_hex: AddressHex, timeout: int = None) -> bool: """ Wait until a contract is mined Args: contract_address_hex: hex encoded address of the contract timeout: time to wait for the contract to get mined Returns: True if the contract got mined, false otherwise """ start_time = time.time() result = self._raiden.rpc_client.web3.eth.getCode( to_canonical_address(contract_address_hex)) current_time = time.time() while not result: if timeout and start_time + timeout > current_time: return False result = self._raiden.rpc_client.web3.eth.getCode( to_canonical_address(contract_address_hex)) gevent.sleep(0.5) current_time = time.time() return len(result) > 0
class ConsoleTools: def __init__(self, raiden_service, discovery, settle_timeout): self._chain = raiden_service.chain self._raiden = raiden_service self._api = RaidenAPI(raiden_service) self._discovery = discovery self.settle_timeout = settle_timeout def create_token( self, registry_address, initial_alloc=10 ** 6, name='raidentester', symbol='RDT', decimals=2, timeout=60, auto_register=True, ): """ Create a proxy for a new HumanStandardToken (ERC20), that is initialized with Args(below). Per default it will be registered with 'raiden'. Args: initial_alloc (int): amount of initial tokens. name (str): human readable token name. symbol (str): token shorthand symbol. decimals (int): decimal places. timeout (int): timeout in seconds for creation. auto_register (boolean): if True(default), automatically register the token with raiden. Returns: token_address_hex: the hex encoded address of the new token/token. """ contract_path = get_contract_path('HumanStandardToken.sol') # Deploy a new ERC20 token with gevent.Timeout(timeout): token_proxy = self._chain.client.deploy_solidity_contract( 'HumanStandardToken', compile_files_cwd([contract_path]), dict(), (initial_alloc, name, decimals, symbol), contract_path=contract_path, ) token_address_hex = encode_hex(token_proxy.contract_address) if auto_register: self.register_token(registry_address, token_address_hex) print("Successfully created {}the token '{}'.".format( 'and registered ' if auto_register else ' ', name, )) return token_address_hex def register_token( self, registry_address_hex: typing.AddressHex, token_address_hex: typing.AddressHex, retry_timeout: typing.NetworkTimeout = DEFAULT_RETRY_TIMEOUT, ) -> TokenNetwork: """ Register a token with the raiden token manager. Args: registry_address: registry address token_address_hex (string): a hex encoded token address. Returns: The token network proxy. """ registry_address = decode_hex(registry_address_hex) token_address = decode_hex(token_address_hex) registry = self._raiden.chain.token_network_registry(registry_address) token_network_address = registry.add_token(token_address) # Register the channel manager with the raiden registry waiting.wait_for_payment_network( self._raiden, registry.address, token_address, retry_timeout, ) return self._raiden.chain.token_network(token_network_address) def open_channel_with_funding( self, registry_address_hex, token_address_hex, peer_address_hex, total_deposit, settle_timeout=None, ): """ Convenience method to open a channel. Args: registry_address_hex (str): hex encoded address of the registry for the channel. token_address_hex (str): hex encoded address of the token for the channel. peer_address_hex (str): hex encoded address of the channel peer. total_deposit (int): amount of total funding for the channel. settle_timeout (int): amount of blocks for the settle time (if None use app defaults). Return: netting_channel: the (newly opened) netting channel object. """ # Check, if peer is discoverable registry_address = decode_hex(registry_address_hex) peer_address = decode_hex(peer_address_hex) token_address = decode_hex(token_address_hex) try: self._discovery.get(peer_address) except KeyError: print('Error: peer {} not found in discovery'.format(peer_address_hex)) return None self._api.channel_open( registry_address, token_address, peer_address, settle_timeout=settle_timeout, ) return self._api.set_total_channel_deposit( registry_address, token_address, peer_address, total_deposit, ) def wait_for_contract(self, contract_address_hex, timeout=None): """ Wait until a contract is mined Args: contract_address_hex (string): hex encoded address of the contract timeout (int): time to wait for the contract to get mined Returns: True if the contract got mined, false otherwise """ contract_address = decode_hex(contract_address_hex) start_time = time.time() result = self._raiden.chain.client.web3.eth.getCode( to_checksum_address(contract_address), ) current_time = time.time() while not result: if timeout and start_time + timeout > current_time: return False result = self._raiden.chain.client.web3.eth.getCode( to_checksum_address(contract_address), ) gevent.sleep(0.5) current_time = time.time() return len(result) > 0
def test_pfs_send_capacity_updates_on_deposit_and_withdraw( raiden_network: List[App], token_addresses: List[TokenAddress]) -> None: """ We need to test if PFSCapacityUpdates and PFSFeeUpdates are being sent after a deposit and withdraw. The nodes open a channel but do not deposit. After deposit and withdraw it is checked that the correct messages are sent. """ app0, app1, app2 = raiden_network api0 = RaidenAPI(app0.raiden) api0.channel_open( token_address=token_addresses[0], registry_address=app0.raiden.default_registry.address, partner_address=app1.raiden.address, ) wait_all_apps(raiden_network) # There should be no messages sent at channel opening assert len(get_messages(app0)) == 0 assert len(get_messages(app1)) == 0 assert len(get_messages(app2)) == 0 api0.set_total_channel_deposit( token_address=token_addresses[0], registry_address=app0.raiden.default_registry.address, partner_address=app1.raiden.address, total_deposit=TokenAmount(10), ) wait_all_apps(raiden_network) # We expect a PFSCapacityUpdate and a PFSFeeUpdate after the deposit messages0 = get_messages(app0) assert len(messages0) == 2 assert len([x for x in messages0 if isinstance(x, PFSCapacityUpdate)]) == 1 assert len([x for x in messages0 if isinstance(x, PFSFeeUpdate)]) == 1 # We expect the same messages for the target messages1 = get_messages(app1) assert len(messages1) == 2 assert len([x for x in messages1 if isinstance(x, PFSCapacityUpdate)]) == 1 assert len([x for x in messages1 if isinstance(x, PFSFeeUpdate)]) == 1 # Unrelated node should not send updates assert len(get_messages(app2)) == 0 api0.set_total_channel_withdraw( token_address=token_addresses[0], registry_address=app0.raiden.default_registry.address, partner_address=app1.raiden.address, total_withdraw=WithdrawAmount(5), ) wait_all_apps(raiden_network) # We expect a PFSCapacityUpdate and a PFSFeeUpdate after the withdraw messages0 = get_messages(app0) assert len(messages0) == 4 assert len([x for x in messages0 if isinstance(x, PFSCapacityUpdate)]) == 2 assert len([x for x in messages0 if isinstance(x, PFSFeeUpdate)]) == 2 # We expect the same messages for the target messages1 = get_messages(app1) assert len(messages1) == 4 assert len([x for x in messages1 if isinstance(x, PFSCapacityUpdate)]) == 2 assert len([x for x in messages1 if isinstance(x, PFSFeeUpdate)]) == 2 # Unrelated node should not send updates assert len(get_messages(app2)) == 0