def test_receive_direct_before_deposit(raiden_network): """Regression test that ensures we accept incoming direct transfers, even if we don't have any back channel balance. """ app0, app1, _ = raiden_network token_address = app0.raiden.default_registry.token_addresses()[0] channel_0_1 = channel(app0, app1, token_address) back_channel = channel(app1, app0, token_address) assert not channel_0_1.can_transfer assert not back_channel.can_transfer deposit_amount = 2 transfer_amount = 1 api0 = RaidenAPI(app0.raiden) api0.deposit(token_address, app1.raiden.address, deposit_amount) app0.raiden.chain.next_block() gevent.sleep(app0.raiden.alarm.wait_time) assert channel_0_1.can_transfer assert not back_channel.can_transfer assert back_channel.distributable == 0 api0.transfer_and_wait(token_address, transfer_amount, app1.raiden.address) gevent.sleep(app1.raiden.alarm.wait_time) assert back_channel.can_transfer assert back_channel.distributable == transfer_amount
def test_get_channel_list(raiden_network, token_addresses): app0, app1, app2 = raiden_network # pylint: disable=unbalanced-tuple-unpacking channel0 = channel(app0, app1, token_addresses[0]) channel1 = channel(app1, app0, token_addresses[0]) channel2 = channel(app0, app2, token_addresses[0]) api0 = RaidenAPI(app0.raiden) api1 = RaidenAPI(app1.raiden) api2 = RaidenAPI(app2.raiden) assert channel0, channel2 in api0.get_channel_list() assert channel0 in api0.get_channel_list(partner_address=app1.raiden.address) assert channel1 in api1.get_channel_list(token_address=token_addresses[0]) assert channel1 in api1.get_channel_list(token_addresses[0], app0.raiden.address) assert not api1.get_channel_list(partner_address=app2.raiden.address) assert not api1.get_channel_list( token_address=token_addresses[0], partner_address=app2.raiden.address, ) assert not api2.get_channel_list( token_address=app2.raiden.address, )
def test_register_token_insufficient_eth(raiden_network, token_amount, contract_manager): app1 = raiden_network[0] registry_address = app1.raiden.default_registry.address token_address = deploy_contract_web3( contract_name=CONTRACT_HUMAN_STANDARD_TOKEN, deploy_client=app1.raiden.chain.client, contract_manager=contract_manager, constructor_arguments=( token_amount, 2, 'raiden', 'Rd', ), ) api1 = RaidenAPI(app1.raiden) assert token_address not in api1.get_tokens_list(registry_address) # app1.raiden loses all its ETH because it has been naughty burn_eth(app1.raiden) # At this point we should get an UnrecoverableError due to InsufficientFunds with pytest.raises(InsufficientFunds): api1.token_network_register(registry_address, token_address)
def test_echo_node_response(token_addresses, raiden_chain, network_wait): app0, app1, app2, echo_app = raiden_chain address_to_app = {app.raiden.address: app for app in raiden_chain} token_address = token_addresses[0] echo_api = RaidenAPI(echo_app.raiden) echo_node = EchoNode(echo_api, token_address) echo_node.ready.wait(timeout=30) assert echo_node.ready.is_set() expected = list() # Create some transfers for num, app in enumerate([app0, app1, app2]): amount = 1 + num transfer_event = RaidenAPI(app.raiden).transfer_async( app.raiden.default_registry.address, token_address, amount, echo_app.raiden.address, 10 ** (num + 1), ) transfer_event.wait(timeout=20) expected.append(amount) while echo_node.num_handled_transfers < len(expected): gevent.sleep(.5) # Check that all transfers were handled correctly def test_events(handled_transfer): app = address_to_app[handled_transfer.initiator] events = RaidenAPI(app.raiden).get_raiden_events_payment_history( token_address=token_address, ) received = { event.identifier: event for event in events if type(event) == EventPaymentReceivedSuccess } if len(received) != 1: return transfer = received.popitem()[1] is_not_valid = ( transfer.initiator != echo_app.raiden.address or transfer.identifier != handled_transfer.identifier + transfer.amount ) if is_not_valid: return return transfer for handled_transfer in echo_node.seen_transfers: assert wait_until(lambda: test_events(handled_transfer), network_wait) echo_node.stop()
def get_channel_events_for_token(app, token_address, start_block=0): """ Collect all events from all channels for a given `token_address` and `app` """ result = list() api = RaidenAPI(app.raiden) channels = api.get_channel_list(token_address=token_address) for channel in channels: events = api.get_channel_events(channel.channel_address, start_block) result.extend(events) return result
def test_deposit_updates_balance_immediately(raiden_chain, token_addresses): """Test that the balance of a channel gets updated by the deposit() call immediately and without having to wait for the `ContractReceiveBalance` message since the API needs to return the channel with the deposit balance updated""" app0, app1 = raiden_chain # pylint: disable=unbalanced-tuple-unpacking api0 = RaidenAPI(app0.raiden) token_address = token_addresses[0] channel_0_1 = channel(app0, app1, token_address) old_balance = channel_0_1.contract_balance returned_channel = api0.deposit(token_address, app1.raiden.address, 10) assert returned_channel.contract_balance == old_balance + 10
def __init__(self, raiden_service, discovery, settle_timeout, reveal_timeout): self._chain = raiden_service.chain self._raiden = raiden_service self._api = RaidenAPI(raiden_service) self._discovery = discovery self.settle_timeout = settle_timeout self.reveal_timeout = reveal_timeout
def __init__(self, raiden, token_network_identifier): chain_state = views.state_from_raiden(raiden) token_network_state = views.get_token_network_by_identifier( chain_state, token_network_identifier, ) token_network_registry = views.get_token_network_registry_by_token_network_identifier( chain_state, token_network_identifier, ) # TODO: # - Add timeout for transaction polling, used to overwrite the RaidenAPI # defaults # - Add a proper selection strategy (#576) self.funds = 0 self.initial_channel_target = 0 self.joinable_funds_target = 0 self.raiden = raiden self.registry_address = token_network_registry.address self.token_network_identifier = token_network_identifier self.token_address = token_network_state.token_address self.lock = Semaphore() #: protects self.funds and self.initial_channel_target self.api = RaidenAPI(raiden)
def test_token_swap(raiden_network, deposit, token_addresses): app0, app1 = raiden_network maker_address = app0.raiden.address taker_address = app1.raiden.address maker_token, taker_token = token_addresses[0], token_addresses[1] maker_amount = 70 taker_amount = 30 identifier = 313 RaidenAPI(app1.raiden).expect_token_swap( # pylint: disable=no-member identifier, maker_token, maker_amount, maker_address, taker_token, taker_amount, taker_address, ) async_result = RaidenAPI(app0.raiden).token_swap_async( # pylint: disable=no-member identifier, maker_token, maker_amount, maker_address, taker_token, taker_amount, taker_address, ) assert async_result.wait() # wait for the taker to receive and process the messages gevent.sleep(0.5) assert_synced_channel_state( maker_token, app0, deposit - maker_amount, [], app1, deposit + maker_amount, [], ) assert_synced_channel_state( taker_token, app0, deposit + taker_amount, [], app1, deposit - taker_amount, [], )
def test_set_deposit_limit_crash(raiden_network, token_amount, contract_manager, retry_timeout): """The development contracts as of 10/12/2018 were crashing if more than an amount was given Regression test for https://github.com/raiden-network/raiden/issues/3135 """ app1 = raiden_network[0] registry_address = app1.raiden.default_registry.address token_address = deploy_contract_web3( contract_name=CONTRACT_HUMAN_STANDARD_TOKEN, deploy_client=app1.raiden.chain.client, contract_manager=contract_manager, constructor_arguments=( token_amount, 2, 'raiden', 'Rd', ), ) api1 = RaidenAPI(app1.raiden) assert token_address not in api1.get_tokens_list(registry_address) api1.token_network_register(registry_address, token_address) exception = RuntimeError('Did not see the token registration within 30 seconds') with gevent.Timeout(seconds=30, exception=exception): wait_for_state_change( app1.raiden, ContractReceiveNewTokenNetwork, { 'token_network': { 'token_address': token_address, }, }, retry_timeout, ) assert token_address in api1.get_tokens_list(registry_address) partner_address = make_address() api1.channel_open( registry_address=app1.raiden.default_registry.address, token_address=token_address, partner_address=partner_address, ) with pytest.raises(DepositOverLimit): api1.set_total_channel_deposit( registry_address=app1.raiden.default_registry.address, token_address=token_address, partner_address=partner_address, total_deposit=10000000000000000000000, )
def test_token_swap(raiden_network, deposit, settle_timeout): app0, app1 = raiden_network maker_address = app0.raiden.address taker_address = app1.raiden.address maker_token, taker_token = list(app0.raiden.token_to_channelgraph.keys())[:2] maker_amount = 70 taker_amount = 30 identifier = 313 RaidenAPI(app1.raiden).expect_token_swap( identifier, maker_token, maker_amount, maker_address, taker_token, taker_amount, taker_address, ) async_result = RaidenAPI(app0.raiden).token_swap_async( identifier, maker_token, maker_amount, maker_address, taker_token, taker_amount, taker_address, ) assert async_result.wait() # wait for the taker to receive and process the messages gevent.sleep(0.5) assert_synched_channels( channel(app0, app1, maker_token), deposit - maker_amount, [], channel(app1, app0, maker_token), deposit + maker_amount, [], ) assert_synched_channels( channel(app0, app1, taker_token), deposit + taker_amount, [], channel(app1, app0, taker_token), deposit - taker_amount, [], )
def test_echo_node_response(token_addresses, raiden_chain): app0, app1, app2, echo_app = raiden_chain address_to_app = {app.raiden.address: app for app in raiden_chain} token_address = token_addresses[0] echo_api = RaidenAPI(echo_app.raiden) echo_node = EchoNode(echo_api, token_address) echo_node.ready.wait(timeout=30) assert echo_node.ready.is_set() expected = list() # Create some transfers for num, app in enumerate([app0, app1, app2]): amount = 1 + num transfer_event = RaidenAPI(app.raiden).transfer_async( token_address, amount, echo_app.raiden.address, 10 ** (num + 1) ) transfer_event.wait(timeout=20) expected.append(amount) while echo_node.num_handled_transfers < len(expected): gevent.sleep(.5) # Check that all transfers were handled correctly for handled_transfer in echo_node.seen_transfers: app = address_to_app[handled_transfer['initiator']] events = get_channel_events_for_token(app, token_address, 0) received = {} for event in events: if event['_event_type'] == 'EventTransferReceivedSuccess': received[repr(event)] = event assert len(received) == 1 transfer = list(received.values())[0] assert transfer['initiator'] == echo_app.raiden.address assert transfer['identifier'] == ( handled_transfer['identifier'] + transfer['amount'] ) echo_node.stop()
def test_snapshotting(raiden_network, token_addresses): app0, app1, app2 = raiden_network api0 = RaidenAPI(app0.raiden) api1 = RaidenAPI(app1.raiden) channel_0_1 = api0.get_channel_list(token_addresses[0], app1.raiden.address) channel_0_2 = api0.get_channel_list(token_addresses[0], app2.raiden.address) assert not api1.get_channel_list(token_addresses[0], app2.raiden.address) assert len(channel_0_1) == 1 assert len(channel_0_2) == 1 api1.transfer_and_wait(token_addresses[0], 5, app2.raiden.address) app0.stop() app1.stop() app2.stop() for app in [app0, app1, app2]: data = load_snapshot(app.raiden.serialization_file) for serialized_channel in data['channels']: network = app.raiden.token_to_channelgraph[serialized_channel.token_address] running_channel = network.address_to_channel[serialized_channel.channel_address] assert running_channel.serialize() == serialized_channel for queue in data['queues']: key = (queue['receiver_address'], queue['token_address']) assert app.raiden.protocol.channel_queue[key].copy() == queue['messages'] assert data['receivedhashes_to_acks'] == app.raiden.protocol.receivedhashes_to_acks assert data['nodeaddresses_to_nonces'] == app.raiden.protocol.nodeaddresses_to_nonces assert data['transfers'] == app.raiden.identifier_to_statemanagers
def test_event_transfer_received_success(token_addresses, raiden_chain): app0, app1, app2, receiver_app = raiden_chain token_address = token_addresses[0] start_block = receiver_app.raiden.get_block_number() expected = dict() for num, app in enumerate([app0, app1, app2]): amount = 1 + num transfer_event = RaidenAPI(app.raiden).transfer_async( token_address, amount, receiver_app.raiden.address, ) transfer_event.wait(timeout=20) expected[app.raiden.address] = amount events = get_channel_events_for_token( receiver_app, token_address, start_block, ) transfer_initiators = list() events_received = list() for event in events: if event['_event_type'] == 'EventTransferReceivedSuccess': events_received.append(event) transfer_initiators.append(event['initiator']) assert expected[event['initiator']] == event['amount'] assert len(events_received) == len(expected), '# of events must be equal to # of transfers' without_receiver_app = [ app0.raiden.address, app1.raiden.address, app2.raiden.address, ] assert set(without_receiver_app) == set(transfer_initiators)
def test_second_manager_address_if_token_registered(raiden_chain, token_addresses): """Test recreating the scenario described on issue: https://github.com/raiden-network/raiden/issues/784""" app0, app1 = raiden_chain # pylint: disable=unbalanced-tuple-unpacking api0 = RaidenAPI(app0.raiden) api1 = RaidenAPI(app1.raiden) # Recreate the race condition by making sure the non-registering app won't # register at all by watching for the TokenAdded blockchain event. app1.raiden.alarm.remove_callback(app1.raiden.poll_blockchain_events) manager_0token = api0.register_token(token_addresses[0]) # The second node does not register but just confirms token is registered. # This is the behaviour the api call implement in register_token(). manager_1token = api1.manager_address_if_token_registered(token_addresses[0]) assert manager_0token == manager_1token # Now make sure the token lists are populated for both nodes tokens0_list = api0.get_tokens_list() tokens1_list = api1.get_tokens_list() assert tokens0_list == tokens1_list assert len(tokens1_list) == 1 assert token_addresses[0] == tokens1_list[0]
def test_deposit_updates_balance_immediately(raiden_chain, token_addresses): """ Test that the balance of a channel gets updated by the deposit() call immediately and without having to wait for the `ContractReceiveChannelNewBalance` message since the API needs to return the channel with the deposit balance updated. """ app0, app1 = raiden_chain registry_address = app0.raiden.default_registry.address token_address = token_addresses[0] token_network_identifier = views.get_token_network_identifier_by_token_address( views.state_from_app(app0), app0.raiden.default_registry.address, token_address, ) api0 = RaidenAPI(app0.raiden) old_state = get_channelstate(app0, app1, token_network_identifier) api0.set_total_channel_deposit(registry_address, token_address, app1.raiden.address, 210) new_state = get_channelstate(app0, app1, token_network_identifier) assert new_state.our_state.contract_balance == old_state.our_state.contract_balance + 10
def test_event_transfer_received_success( token_addresses, raiden_chain, network_wait, ): app0, app1, app2, receiver_app = raiden_chain token_address = token_addresses[0] expected = dict() for num, app in enumerate([app0, app1, app2]): amount = 1 + num transfer_event = RaidenAPI(app.raiden).transfer_async( app.raiden.default_registry.address, token_address, amount, receiver_app.raiden.address, ) transfer_event.wait(timeout=20) expected[app.raiden.address] = amount # sleep is for the receiver's node to have time to process all events gevent.sleep(1) def test_events(amount, address): return must_contain_entry( receiver_app.raiden.wal.storage.get_events(), EventPaymentReceivedSuccess, {'amount': amount, 'initiator': address}, ) amounts = [1, 2, 3] addrs = [app0.raiden.address, app1.raiden.address, app2.raiden.address] for amount, address in zip(amounts, addrs): assert wait_until( lambda: test_events(amount, address), network_wait, )
def test_register_token(raiden_chain, token_addresses): app0, _ = raiden_chain # pylint: disable=unbalanced-tuple-unpacking api0 = RaidenAPI(app0.raiden) assert api0.manager_address_if_token_registered(token_addresses[0]) is None manager_0token = api0.register_token(token_addresses[0]) assert manager_0token == api0.manager_address_if_token_registered(token_addresses[0]) # Exception if we try to reregister with pytest.raises(ValueError): api0.register_token(token_addresses[0])
def test_close_regression(raiden_network, deposit, token_addresses): """ The python api was using the wrong balance proof to close the channel, thus the close was failing if a transfer was made. """ app0, app1 = raiden_network registry_address = app0.raiden.default_registry.address token_address = token_addresses[0] api1 = RaidenAPI(app0.raiden) api2 = RaidenAPI(app1.raiden) registry_address = app0.raiden.default_registry.address channel_list = api1.get_channel_list(registry_address, token_address, app1.raiden.address) channel12 = channel_list[0] token_proxy = app0.raiden.chain.token(token_address) node1_balance_before = token_proxy.balance_of(api1.address) node2_balance_before = token_proxy.balance_of(api2.address) # Initialize app2 balance proof and close the channel amount = 10 identifier = 42 assert api1.transfer( registry_address, token_address, amount, api2.address, identifier=identifier, ) exception = ValueError('Waiting for transfer received success in the WAL timed out') with gevent.Timeout(seconds=5, exception=exception): waiting.wait_for_transfer_success( app1.raiden, identifier, amount, app1.raiden.alarm.sleep_time, ) api2.channel_close(registry_address, token_address, api1.address) waiting.wait_for_settle( app0.raiden, app0.raiden.default_registry.address, token_address, [channel12.identifier], app0.raiden.alarm.sleep_time, ) node1_expected_balance = node1_balance_before + deposit - amount node2_expected_balance = node2_balance_before + deposit + amount assert token_proxy.balance_of(api1.address) == node1_expected_balance assert token_proxy.balance_of(api2.address) == node2_expected_balance
def test_register_token(raiden_network, token_amount, contract_manager, retry_timeout): app1 = raiden_network[0] registry_address = app1.raiden.default_registry.address token_address = deploy_contract_web3( contract_name=CONTRACT_HUMAN_STANDARD_TOKEN, deploy_client=app1.raiden.chain.client, contract_manager=contract_manager, constructor_arguments=( token_amount, 2, 'raiden', 'Rd', ), ) api1 = RaidenAPI(app1.raiden) assert token_address not in api1.get_tokens_list(registry_address) api1.token_network_register(registry_address, token_address) exception = RuntimeError('Did not see the token registration within 30 seconds') with gevent.Timeout(seconds=30, exception=exception): wait_for_state_change( app1.raiden, ContractReceiveNewTokenNetwork, { 'token_network': { 'token_address': token_address, }, }, retry_timeout, ) assert token_address in api1.get_tokens_list(registry_address) # Exception if we try to reregister with pytest.raises(AlreadyRegisteredTokenAddress): api1.token_network_register(registry_address, token_address)
def test_echo_node_lottery(token_addresses, raiden_chain, network_wait): app0, app1, app2, app3, echo_app, app4, app5, app6 = raiden_chain address_to_app = {app.raiden.address: app for app in raiden_chain} token_address = token_addresses[0] echo_api = RaidenAPI(echo_app.raiden) echo_node = EchoNode(echo_api, token_address) echo_node.ready.wait(timeout=30) assert echo_node.ready.is_set() expected = list() # Let 6 participants enter the pool amount = 7 for num, app in enumerate([app0, app1, app2, app3, app4, app5]): transfer_event = RaidenAPI(app.raiden).transfer_async( app.raiden.default_registry.address, token_address, amount, echo_app.raiden.address, 10 ** (num + 1), ) transfer_event.wait(timeout=20) expected.append(amount) # test duplicated identifier + amount is ignored transfer_event = RaidenAPI(app5.raiden).transfer_async( app.raiden.default_registry.address, token_address, amount, # same amount as before echo_app.raiden.address, 10 ** 6, # app5 used this identifier before ).wait(timeout=20) # test pool size querying pool_query_identifier = 77 # unused identifier different from previous one transfer_event = RaidenAPI(app5.raiden).transfer_async( app.raiden.default_registry.address, token_address, amount, echo_app.raiden.address, pool_query_identifier, ).wait(timeout=20) expected.append(amount) # fill the pool transfer_event = RaidenAPI(app6.raiden).transfer_async( app.raiden.default_registry.address, token_address, amount, echo_app.raiden.address, 10 ** 7, ).wait(timeout=20) expected.append(amount) while echo_node.num_handled_transfers < len(expected): gevent.sleep(.5) def get_echoed_transfer(sent_transfer): """For a given transfer sent to echo node, get the corresponding echoed transfer""" app = address_to_app[sent_transfer.initiator] events = RaidenAPI(app.raiden).get_raiden_events_payment_history( token_address=token_address, ) def is_valid(event): return ( type(event) == EventPaymentReceivedSuccess and event.initiator == echo_app.raiden.address and event.identifier == sent_transfer.identifier + event.amount ) received = { event.identifier: event for event in events if is_valid(event) } if len(received) != 1: return return received.popitem()[1] def received_is_of_size(size): """Return transfers received from echo_node when there's size transfers""" received = {} # Check that payout was generated and pool_size_query answered for handled_transfer in echo_node.seen_transfers: event = get_echoed_transfer(handled_transfer) if not event: continue received[event.identifier] = event if len(received) == size: return received # wait for the expected echoed transfers to be handled received = wait_until(lambda: received_is_of_size(2), 2 * network_wait) assert received received = sorted(received.values(), key=lambda transfer: transfer.amount) pool_query = received[0] assert pool_query.amount == 6 assert pool_query.identifier == pool_query_identifier + 6 winning_transfer = received[1] assert winning_transfer.initiator == echo_app.raiden.address assert winning_transfer.amount == 49 assert (winning_transfer.identifier - 49) % 10 == 0 echo_node.stop()
def test_query_events( raiden_chain, token_addresses, deposit, settle_timeout, retry_timeout, contract_manager, ): app0, app1 = raiden_chain # pylint: disable=unbalanced-tuple-unpacking registry_address = app0.raiden.default_registry.address token_address = token_addresses[0] token_network_identifier = views.get_token_network_identifier_by_token_address( views.state_from_app(app0), registry_address, token_address, ) token_network_address = app0.raiden.default_registry.get_token_network(token_address) manager0 = app0.raiden.chain.token_network(token_network_address) channelcount0 = views.total_token_network_channels( views.state_from_app(app0), registry_address, token_address, ) events = get_token_network_registry_events( chain=app0.raiden.chain, token_network_registry_address=registry_address, contract_manager=contract_manager, events=ALL_EVENTS, ) assert must_have_event( events, { 'event': EVENT_TOKEN_NETWORK_CREATED, 'args': { 'token_network_address': to_checksum_address(manager0.address), 'token_address': to_checksum_address(token_address), }, }, ) events = get_token_network_registry_events( chain=app0.raiden.chain, token_network_registry_address=app0.raiden.default_registry.address, contract_manager=contract_manager, events=ALL_EVENTS, from_block=999999998, to_block=999999999, ) assert not events RaidenAPI(app0.raiden).channel_open( registry_address, token_address, app1.raiden.address, ) wait_both_channel_open(app0, app1, registry_address, token_address, retry_timeout) events = get_token_network_events( chain=app0.raiden.chain, token_network_address=manager0.address, contract_manager=contract_manager, events=ALL_EVENTS, ) _event = must_have_event( events, { 'event': ChannelEvent.OPENED, 'args': { 'participant1': to_checksum_address(app0.raiden.address), 'participant2': to_checksum_address(app1.raiden.address), 'settle_timeout': settle_timeout, }, }, ) assert _event channel_id = _event['args']['channel_identifier'] events = get_token_network_events( chain=app0.raiden.chain, token_network_address=manager0.address, contract_manager=contract_manager, events=ALL_EVENTS, from_block=999999998, to_block=999999999, ) assert not events # channel is created but not opened and without funds channelcount1 = views.total_token_network_channels( views.state_from_app(app0), registry_address, token_address, ) assert channelcount0 + 1 == channelcount1 assert_synced_channel_state( token_network_identifier, app0, 0, [], app1, 0, [], ) RaidenAPI(app0.raiden).set_total_channel_deposit( registry_address, token_address, app1.raiden.address, deposit, ) all_netting_channel_events = get_all_netting_channel_events( chain=app0.raiden.chain, token_network_address=token_network_identifier, netting_channel_identifier=channel_id, contract_manager=app0.raiden.contract_manager, ) deposit_events = get_netting_channel_deposit_events( chain=app0.raiden.chain, token_network_address=token_network_identifier, netting_channel_identifier=channel_id, contract_manager=contract_manager, ) total_deposit_event = { 'event': ChannelEvent.DEPOSIT, 'args': { 'participant': to_checksum_address(app0.raiden.address), 'total_deposit': deposit, 'channel_identifier': channel_id, }, } assert must_have_event(deposit_events, total_deposit_event) assert must_have_event(all_netting_channel_events, total_deposit_event) RaidenAPI(app0.raiden).channel_close( registry_address, token_address, app1.raiden.address, ) all_netting_channel_events = get_all_netting_channel_events( chain=app0.raiden.chain, token_network_address=token_network_identifier, netting_channel_identifier=channel_id, contract_manager=app0.raiden.contract_manager, ) closed_events = get_netting_channel_closed_events( chain=app0.raiden.chain, token_network_address=token_network_identifier, netting_channel_identifier=channel_id, contract_manager=contract_manager, ) closed_event = { 'event': ChannelEvent.CLOSED, 'args': { 'channel_identifier': channel_id, 'closing_participant': to_checksum_address(app0.raiden.address), }, } assert must_have_event(closed_events, closed_event) assert must_have_event(all_netting_channel_events, closed_event) settle_expiration = app0.raiden.chain.block_number() + settle_timeout + 5 wait_until_block(app0.raiden.chain, settle_expiration) all_netting_channel_events = get_all_netting_channel_events( chain=app0.raiden.chain, token_network_address=token_network_identifier, netting_channel_identifier=channel_id, contract_manager=app0.raiden.contract_manager, ) settled_events = get_netting_channel_settled_events( chain=app0.raiden.chain, token_network_address=token_network_identifier, netting_channel_identifier=channel_id, contract_manager=contract_manager, ) settled_event = { 'event': ChannelEvent.SETTLED, 'args': { 'channel_identifier': channel_id, }, } assert must_have_event(settled_events, settled_event) assert must_have_event(all_netting_channel_events, settled_event)
def run( privatekey, registry_contract_address, discovery_contract_address, listen_address, logging, logfile, scenario, stage_prefix, results_filename): # pylint: disable=unused-argument # TODO: only enabled logging on "initiators" slogging.configure(logging, log_file=logfile) (listen_host, listen_port) = split_endpoint(listen_address) config = App.DEFAULT_CONFIG.copy() config['host'] = listen_host config['port'] = listen_port config['privatekey_hex'] = privatekey privatekey_bin = decode_hex(privatekey) rpc_client = JSONRPCClient( '127.0.0.1', 8545, privatekey_bin, ) blockchain_service = BlockChainService( privatekey_bin, rpc_client, GAS_LIMIT, GAS_PRICE, ) discovery = ContractDiscovery( blockchain_service, decode_hex(discovery_contract_address) ) registry = blockchain_service.registry( registry_contract_address ) app = App( config, blockchain_service, registry, discovery, ) app.discovery.register( app.raiden.address, listen_host, listen_port, ) app.raiden.register_registry(app.raiden.default_registry.address) if scenario: script = json.load(scenario) tools = ConsoleTools( app.raiden, app.discovery, app.config['settle_timeout'], app.config['reveal_timeout'], ) transfers_by_peer = {} tokens = script['tokens'] token_address = None peer = None our_node = hexlify(app.raiden.address) log.warning("our address is {}".format(our_node)) for token in tokens: # skip tokens that we're not part of nodes = token['channels'] if our_node not in nodes: continue partner_nodes = [ node for node in nodes if node != our_node ] # allow for prefunded tokens if 'token_address' in token: token_address = token['token_address'] else: token_address = tools.create_token() transfers_with_amount = token['transfers_with_amount'] # FIXME: in order to do bidirectional channels, only one side # (i.e. only token['channels'][0]) should # open; others should join by calling # raiden.api.deposit, AFTER the channel came alive! # NOTE: leaving unidirectional for now because it most # probably will get to higher throughput log.warning("Waiting for all nodes to come online") api = RaidenAPI(app.raiden) for node in partner_nodes: api.start_health_check_for(node) while True: all_reachable = all( api.get_node_network_state(node) == NODE_NETWORK_REACHABLE for node in partner_nodes ) if all_reachable: break gevent.sleep(5) log.warning("All nodes are online") if our_node != nodes[-1]: our_index = nodes.index(our_node) peer = nodes[our_index + 1] tools.register_token(token_address) amount = transfers_with_amount[nodes[-1]] while True: try: app.discovery.get(peer.decode('hex')) break except KeyError: log.warning("Error: peer {} not found in discovery".format(peer)) time.sleep(random.randrange(30)) while True: try: log.warning("Opening channel with {} for {}".format(peer, token_address)) api.open(token_address, peer) break except KeyError: log.warning("Error: could not open channel with {}".format(peer)) time.sleep(random.randrange(30)) while True: try: log.warning("Funding channel with {} for {}".format(peer, token_address)) api.deposit(token_address, peer, amount) break except Exception: log.warning("Error: could not deposit {} for {}".format(amount, peer)) time.sleep(random.randrange(30)) if our_index == 0: last_node = nodes[-1] transfers_by_peer[last_node] = int(amount) else: peer = nodes[-2] if stage_prefix is not None: open('{}.stage1'.format(stage_prefix), 'a').close() log.warning("Done with initialization, waiting to continue...") event = gevent.event.Event() gevent.signal(signal.SIGUSR2, event.set) event.wait() transfer_results = {'total_time': 0, 'timestamps': []} def transfer(token_address, amount_per_transfer, total_transfers, peer, is_async): def transfer_(): log.warning("Making {} transfers to {}".format(total_transfers, peer)) initial_time = time.time() times = [0] * total_transfers for index in range(total_transfers): RaidenAPI(app.raiden).transfer( token_address.decode('hex'), amount_per_transfer, peer, ) times[index] = time.time() transfer_results['total_time'] = time.time() - initial_time transfer_results['timestamps'] = times log.warning("Making {} transfers took {}".format( total_transfers, transfer_results['total_time'])) log.warning("Times: {}".format(times)) if is_async: return gevent.spawn(transfer_) else: transfer_() # If sending to multiple targets, do it asynchronously, otherwise # keep it simple and just send to the single target on my thread. if len(transfers_by_peer) > 1: greenlets = [] for peer_, amount in transfers_by_peer.items(): greenlet = transfer(token_address, 1, amount, peer_, True) if greenlet is not None: greenlets.append(greenlet) gevent.joinall(greenlets) elif len(transfers_by_peer) == 1: for peer_, amount in transfers_by_peer.items(): transfer(token_address, 1, amount, peer_, False) log.warning("Waiting for termination") open('{}.stage2'.format(stage_prefix), 'a').close() log.warning("Waiting for transfers to finish, will write results...") event = gevent.event.Event() gevent.signal(signal.SIGUSR2, event.set) event.wait() results = tools.channel_stats_for(token_address, peer) if transfer_results['total_time'] != 0: results['total_time'] = transfer_results['total_time'] if len(transfer_results['timestamps']) > 0: results['timestamps'] = transfer_results['timestamps'] results['channel'] = repr(results['channel']) # FIXME log.warning("Results: {}".format(results)) with open(results_filename, 'w') as fp: json.dump(results, fp, indent=2) open('{}.stage3'.format(stage_prefix), 'a').close() event = gevent.event.Event() gevent.signal(signal.SIGQUIT, event.set) gevent.signal(signal.SIGTERM, event.set) gevent.signal(signal.SIGINT, event.set) event.wait() else: log.warning("No scenario file supplied, doing nothing!") open('{}.stage2'.format(stage_prefix), 'a').close() event = gevent.event.Event() gevent.signal(signal.SIGQUIT, event.set) gevent.signal(signal.SIGTERM, event.set) gevent.signal(signal.SIGINT, event.set) event.wait() app.stop()
def test_channel_lifecycle(raiden_network, token_addresses, deposit, transport_config): node1, node2 = raiden_network token_address = token_addresses[0] token_network_identifier = views.get_token_network_identifier_by_token_address( views.state_from_app(node1), node1.raiden.default_registry.address, token_address, ) api1 = RaidenAPI(node1.raiden) api2 = RaidenAPI(node2.raiden) registry_address = node1.raiden.default_registry.address # nodes don't have a channel, so they are not healthchecking assert api1.get_node_network_state(api2.address) == NODE_NETWORK_UNKNOWN assert api2.get_node_network_state(api1.address) == NODE_NETWORK_UNKNOWN assert not api1.get_channel_list(registry_address, token_address, api2.address) # open is a synchronous api api1.channel_open(node1.raiden.default_registry.address, token_address, api2.address) channels = api1.get_channel_list(registry_address, token_address, api2.address) assert len(channels) == 1 channel12 = get_channelstate(node1, node2, token_network_identifier) assert channel.get_status(channel12) == CHANNEL_STATE_OPENED event_list1 = api1.get_channel_events( token_address, channel12.partner_state.address, channel12.open_transaction.finished_block_number, ) assert any( ( event['event'] == EVENT_CHANNEL_OPENED and is_same_address( event['args']['participant1'], to_normalized_address(api1.address), ) and is_same_address( event['args']['participant2'], to_normalized_address(api2.address), ) ) for event in event_list1 ) token_events = api1.get_token_network_events( token_address, channel12.open_transaction.finished_block_number, ) assert token_events[0]['event'] == EVENT_CHANNEL_OPENED registry_address = api1.raiden.default_registry.address # Load the new state with the deposit api1.set_total_channel_deposit( registry_address, token_address, api2.address, deposit, ) # let's make sure it's idempotent api1.set_total_channel_deposit( registry_address, token_address, api2.address, deposit, ) channel12 = get_channelstate(node1, node2, token_network_identifier) assert channel.get_status(channel12) == CHANNEL_STATE_OPENED assert channel.get_balance(channel12.our_state, channel12.partner_state) == deposit assert channel12.our_state.contract_balance == deposit assert api1.get_channel_list(registry_address, token_address, api2.address) == [channel12] # there is a channel open, they must be healthchecking each other assert api1.get_node_network_state(api2.address) == NODE_NETWORK_REACHABLE assert api2.get_node_network_state(api1.address) == NODE_NETWORK_REACHABLE event_list2 = api1.get_channel_events( token_address, channel12.partner_state.address, channel12.open_transaction.finished_block_number, ) assert any( ( event['event'] == EVENT_CHANNEL_DEPOSIT and is_same_address( event['args']['participant'], to_normalized_address(api1.address), ) and event['args']['total_deposit'] == deposit ) for event in event_list2 ) api1.channel_close(registry_address, token_address, api2.address) # Load the new state with the channel closed channel12 = get_channelstate(node1, node2, token_network_identifier) event_list3 = api1.get_channel_events( token_address, channel12.partner_state.address, channel12.open_transaction.finished_block_number, ) assert len(event_list3) > len(event_list2) assert any( ( event['event'] == EVENT_CHANNEL_CLOSED and is_same_address( event['args']['closing_participant'], to_normalized_address(api1.address), ) ) for event in event_list3 ) assert channel.get_status(channel12) == CHANNEL_STATE_CLOSED settlement_block = ( channel12.close_transaction.finished_block_number + channel12.settle_timeout + 10 # arbitrary number of additional blocks, used to wait for the settle() call ) wait_until_block(node1.raiden.chain, settlement_block) # Load the new state with the channel settled channel12 = get_channelstate(node1, node2, token_network_identifier) assert channel.get_status(channel12) == CHANNEL_STATE_SETTLED
def run( privatekey, registry_contract_address, secret_registry_contract_address, discovery_contract_address, listen_address, structlog, logfile, scenario, stage_prefix, ): # pylint: disable=unused-argument # TODO: only enabled structlog on "initiators" structlog.configure(structlog, log_file=logfile) (listen_host, listen_port) = split_endpoint(listen_address) config = App.DEFAULT_CONFIG.copy() config['host'] = listen_host config['port'] = listen_port config['privatekey_hex'] = privatekey privatekey_bin = decode_hex(privatekey) rpc_client = JSONRPCClient( '127.0.0.1', 8545, privatekey_bin, ) blockchain_service = BlockChainService( privatekey_bin, rpc_client, GAS_PRICE, ) discovery = ContractDiscovery( blockchain_service, decode_hex(discovery_contract_address), ) registry = blockchain_service.registry( registry_contract_address, ) secret_registry = blockchain_service.secret_registry( secret_registry_contract_address, ) throttle_policy = TokenBucket( config['protocol']['throttle_capacity'], config['protocol']['throttle_fill_rate'], ) transport = UDPTransport( discovery, server._udp_socket((listen_host, listen_port)), throttle_policy, config['protocol'], dict(), ) app = App( config, blockchain_service, registry, secret_registry, transport, discovery, ) app.discovery.register( app.raiden.address, listen_host, listen_port, ) app.raiden.install_payment_network_filters(app.raiden.default_registry.address) if scenario: script = json.load(scenario) tools = ConsoleTools( app.raiden, app.discovery, app.config['settle_timeout'], app.config['reveal_timeout'], ) transfers_by_peer = {} tokens = script['tokens'] token_address = None peer = None our_node = hexlify(app.raiden.address) log.warning('our address is {}'.format(our_node)) for token in tokens: # skip tokens that we're not part of nodes = token['channels'] if our_node not in nodes: continue partner_nodes = [ node for node in nodes if node != our_node ] # allow for prefunded tokens if 'token_address' in token: token_address = token['token_address'] else: token_address = tools.create_token(registry_contract_address) transfers_with_amount = token['transfers_with_amount'] # FIXME: in order to do bidirectional channels, only one side # (i.e. only token['channels'][0]) should # open; others should join by calling # raiden.api.deposit, AFTER the channel came alive! # NOTE: leaving unidirectional for now because it most # probably will get to higher throughput log.warning('Waiting for all nodes to come online') api = RaidenAPI(app.raiden) for node in partner_nodes: api.start_health_check_for(node) while True: all_reachable = all( api.get_node_network_state(node) == NODE_NETWORK_REACHABLE for node in partner_nodes ) if all_reachable: break gevent.sleep(5) log.warning('All nodes are online') if our_node != nodes[-1]: our_index = nodes.index(our_node) peer = nodes[our_index + 1] tools.token_network_register(app.raiden.default_registry.address, token_address) amount = transfers_with_amount[nodes[-1]] while True: try: app.discovery.get(peer.decode('hex')) break except KeyError: log.warning('Error: peer {} not found in discovery'.format(peer)) time.sleep(random.randrange(30)) while True: try: log.warning('Opening channel with {} for {}'.format(peer, token_address)) api.channel_open(app.raiden.default_registry.address, token_address, peer) break except KeyError: log.warning('Error: could not open channel with {}'.format(peer)) time.sleep(random.randrange(30)) while True: try: log.warning('Funding channel with {} for {}'.format(peer, token_address)) api.channel_deposit( app.raiden.default_registry.address, token_address, peer, amount, ) break except Exception: log.warning('Error: could not deposit {} for {}'.format(amount, peer)) time.sleep(random.randrange(30)) if our_index == 0: last_node = nodes[-1] transfers_by_peer[last_node] = int(amount) else: peer = nodes[-2] if stage_prefix is not None: open('{}.stage1'.format(stage_prefix), 'a').close() log.warning('Done with initialization, waiting to continue...') event = gevent.event.Event() gevent.signal(signal.SIGUSR2, event.set) event.wait() transfer_results = {'total_time': 0, 'timestamps': []} def transfer(token_address, amount_per_transfer, total_transfers, peer, is_async): def transfer_(): log.warning('Making {} transfers to {}'.format(total_transfers, peer)) initial_time = time.time() times = [0] * total_transfers for index in range(total_transfers): RaidenAPI(app.raiden).transfer( app.raiden.default_registry.address, token_address.decode('hex'), amount_per_transfer, peer, ) times[index] = time.time() transfer_results['total_time'] = time.time() - initial_time transfer_results['timestamps'] = times log.warning('Making {} transfers took {}'.format( total_transfers, transfer_results['total_time'])) log.warning('Times: {}'.format(times)) if is_async: return gevent.spawn(transfer_) else: transfer_() # If sending to multiple targets, do it asynchronously, otherwise # keep it simple and just send to the single target on my thread. if len(transfers_by_peer) > 1: greenlets = [] for peer_, amount in transfers_by_peer.items(): greenlet = transfer(token_address, 1, amount, peer_, True) if greenlet is not None: greenlets.append(greenlet) gevent.joinall(greenlets) elif len(transfers_by_peer) == 1: for peer_, amount in transfers_by_peer.items(): transfer(token_address, 1, amount, peer_, False) log.warning('Waiting for termination') open('{}.stage2'.format(stage_prefix), 'a').close() log.warning('Waiting for transfers to finish, will write results...') event = gevent.event.Event() gevent.signal(signal.SIGUSR2, event.set) event.wait() open('{}.stage3'.format(stage_prefix), 'a').close() event = gevent.event.Event() gevent.signal(signal.SIGQUIT, event.set) gevent.signal(signal.SIGTERM, event.set) gevent.signal(signal.SIGINT, event.set) event.wait() else: log.warning('No scenario file supplied, doing nothing!') open('{}.stage2'.format(stage_prefix), 'a').close() event = gevent.event.Event() gevent.signal(signal.SIGQUIT, event.set) gevent.signal(signal.SIGTERM, event.set) gevent.signal(signal.SIGINT, event.set) event.wait() app.stop()
def test_batch_unlock(raiden_network, token_addresses, secret_registry_address, deposit): """Batch unlock can be called after the channel is settled.""" alice_app, bob_app = raiden_network registry_address = alice_app.raiden.default_registry.address token_address = token_addresses[0] token_proxy = alice_app.raiden.chain.token(token_address) token_network_identifier = views.get_token_network_identifier_by_token_address( views.state_from_app(alice_app), alice_app.raiden.default_registry.address, token_address, ) # Take a snapshot early on alice_app.raiden.wal.snapshot() token_network = views.get_token_network_by_identifier( views.state_from_app(alice_app), token_network_identifier, ) channel_identifier = get_channelstate(alice_app, bob_app, token_network_identifier).identifier assert channel_identifier in token_network.partneraddresses_to_channels[ bob_app.raiden.address] alice_initial_balance = token_proxy.balance_of(alice_app.raiden.address) bob_initial_balance = token_proxy.balance_of(bob_app.raiden.address) # Take snapshot before transfer alice_app.raiden.wal.snapshot() alice_to_bob_amount = 10 identifier = 1 secret = pending_mediated_transfer( raiden_network, token_network_identifier, alice_to_bob_amount, identifier, ) secrethash = sha3(secret) alice_bob_channel_state = get_channelstate(alice_app, bob_app, token_network_identifier) lock = channel.get_lock(alice_bob_channel_state.our_state, secrethash) # This is the current state of the protocol: # # A -> B LockedTransfer # B -> A SecretRequest # - protocol didn't continue assert_synced_channel_state( token_network_identifier, alice_app, deposit, [lock], bob_app, deposit, [], ) # Take a snapshot early on alice_app.raiden.wal.snapshot() our_balance_proof = alice_bob_channel_state.our_state.balance_proof # Test WAL restore to return the latest channel state restored_channel_state = channel_state_until_state_change( raiden=alice_app.raiden, payment_network_identifier=alice_app.raiden.default_registry.address, token_address=token_address, channel_identifier=alice_bob_channel_state.identifier, state_change_identifier='latest', ) our_restored_balance_proof = restored_channel_state.our_state.balance_proof assert our_balance_proof == our_restored_balance_proof # A ChannelClose event will be generated, this will be polled by both apps # and each must start a task for calling settle RaidenAPI(bob_app.raiden).channel_close( registry_address, token_address, alice_app.raiden.address, ) secret_registry_proxy = alice_app.raiden.chain.secret_registry( secret_registry_address, ) secret_registry_proxy.register_secret(secret) assert lock, 'the lock must still be part of the node state' msg = 'the secret must be registered before the lock expires' assert lock.expiration > alice_app.raiden.get_block_number(), msg assert lock.secrethash == sha3(secret) waiting.wait_for_settle( alice_app.raiden, registry_address, token_address, [alice_bob_channel_state.identifier], alice_app.raiden.alarm.sleep_time, ) token_network = views.get_token_network_by_identifier( views.state_from_app(bob_app), token_network_identifier, ) assert channel_identifier in token_network.partneraddresses_to_channels[ alice_app.raiden.address] # wait for the node to call batch unlock with gevent.Timeout(10): wait_for_batch_unlock( bob_app, token_network_identifier, alice_bob_channel_state.partner_state.address, alice_bob_channel_state.our_state.address, ) token_network = views.get_token_network_by_identifier( views.state_from_app(bob_app), token_network_identifier, ) assert channel_identifier not in token_network.partneraddresses_to_channels[ alice_app.raiden.address] alice_new_balance = alice_initial_balance + deposit - alice_to_bob_amount bob_new_balance = bob_initial_balance + deposit + alice_to_bob_amount assert token_proxy.balance_of( alice_app.raiden.address) == alice_new_balance assert token_proxy.balance_of(bob_app.raiden.address) == bob_new_balance
def test_token_registered_race(raiden_chain, token_amount, retry_timeout, contract_manager): """If a token is registered it must appear on the token list. If two nodes register the same token one of the transactions will fail. The node that receives an error for "already registered token" must see the token in the token list. Issue: #784 """ app0, app1 = raiden_chain api0 = RaidenAPI(app0.raiden) api1 = RaidenAPI(app1.raiden) # Recreate the race condition by making sure the non-registering app won't # register at all by watching for the TokenAdded blockchain event. event_listeners = app1.raiden.blockchain_events.event_listeners app1.raiden.blockchain_events.event_listeners = list() token_address = deploy_contract_web3( contract_name=CONTRACT_HUMAN_STANDARD_TOKEN, deploy_client=app1.raiden.chain.client, contract_manager=contract_manager, constructor_arguments=( token_amount, 2, 'raiden', 'Rd', ), ) gevent.sleep(1) registry_address = app0.raiden.default_registry.address assert token_address not in api0.get_tokens_list(registry_address) assert token_address not in api1.get_tokens_list(registry_address) api0.token_network_register(registry_address, token_address) exception = RuntimeError('Did not see the token registration within 30 seconds') with gevent.Timeout(seconds=30, exception=exception): wait_for_state_change( app0.raiden, ContractReceiveNewTokenNetwork, { 'token_network': { 'token_address': token_address, }, }, retry_timeout, ) assert token_address in api0.get_tokens_list(registry_address) assert token_address not in api1.get_tokens_list(registry_address) # The next time when the event is polled, the token is registered app1.raiden.blockchain_events.event_listeners = event_listeners waiting.wait_for_block( app1.raiden, app1.raiden.get_block_number() + 1, retry_timeout, ) assert token_address in api1.get_tokens_list(registry_address)
class ConnectionManager: """The ConnectionManager provides a high level abstraction for connecting to a Token network. Note: It is initialized with 0 funds; a connection to the token network will be only established _after_ calling `connect(funds)` """ # XXX Hack: for bootstrapping, the first node on a network opens a channel # with this address to become visible. BOOTSTRAP_ADDR_HEX = b'2' * 40 BOOTSTRAP_ADDR = unhexlify(BOOTSTRAP_ADDR_HEX) def __init__(self, raiden, token_network_identifier): chain_state = views.state_from_raiden(raiden) token_network_state = views.get_token_network_by_identifier( chain_state, token_network_identifier, ) token_network_registry = views.get_token_network_registry_by_token_network_identifier( chain_state, token_network_identifier, ) # TODO: # - Add timeout for transaction polling, used to overwrite the RaidenAPI # defaults # - Add a proper selection strategy (#576) self.funds = 0 self.initial_channel_target = 0 self.joinable_funds_target = 0 self.raiden = raiden self.registry_address = token_network_registry.address self.token_network_identifier = token_network_identifier self.token_address = token_network_state.token_address self.lock = Semaphore() #: protects self.funds and self.initial_channel_target self.api = RaidenAPI(raiden) def connect( self, funds: int, initial_channel_target: int = 3, joinable_funds_target: float = 0.4, ): """Connect to the network. Subsequent calls to `connect` are allowed, but will only affect the spendable funds and the connection strategy parameters for the future. `connect` will not close any channels. Note: the ConnectionManager does not discriminate manually opened channels from automatically opened ones. If the user manually opened channels, those deposit amounts will affect the funding per channel and the number of new channels opened. Args: funds: Target amount of tokens spendable to join the network. initial_channel_target: Target number of channels to open. joinable_funds_target: Amount of funds not initially assigned. """ if funds <= 0: raise ValueError('connecting needs a positive value for `funds`') with self.lock: self.funds = funds self.initial_channel_target = initial_channel_target self.joinable_funds_target = joinable_funds_target log_open_channels(self.raiden, self.registry_address, self.token_address, funds) qty_network_channels = views.count_token_network_channels( views.state_from_raiden(self.raiden), self.registry_address, self.token_address, ) if not qty_network_channels: log.debug('bootstrapping token network.') # make ourselves visible self.api.channel_open( self.registry_address, self.token_address, self.BOOTSTRAP_ADDR, ) else: self._open_channels() def leave_async(self, only_receiving=True): """ Async version of `leave()` """ leave_result = AsyncResult() gevent.spawn(self.leave, only_receiving).link(leave_result) return leave_result def leave(self, registry_address, only_receiving=True): """ Leave the token network. This implies closing all channels and waiting for all channels to be settled. Note: By default we're just discarding all channels for which we haven't received anything. This potentially leaves deposits locked in channels after `closing`. This is "safe" from an accounting point of view (deposits can not be lost), but may still be undesirable from a liquidity point of view (deposits will only be freed after manually closing or after the partner closed the channel). If only_receiving is False then we close and settle all channels irrespective of them having received transfers or not. """ with self.lock: self.initial_channel_target = 0 if only_receiving: channels_to_close = views.get_channelstate_for_receiving( views.state_from_raiden(self.raiden), registry_address, self.token_address, ) else: channels_to_close = views.get_channelstate_open( chain_state=views.state_from_raiden(self.raiden), payment_network_id=registry_address, token_address=self.token_address, ) partner_addresses = [ channel_state.partner_state.address for channel_state in channels_to_close ] self.api.channel_batch_close( registry_address, self.token_address, partner_addresses, ) channel_ids = [ channel_state.identifier for channel_state in channels_to_close ] waiting.wait_for_settle( self.raiden, registry_address, self.token_address, channel_ids, self.raiden.alarm.sleep_time, ) return channels_to_close def join_channel(self, partner_address, partner_deposit): """Will be called, when we were selected as channel partner by another node. It will fund the channel with up to the partners deposit, but not more than remaining funds or the initial funding per channel. If the connection manager has no funds, this is a noop. """ with self.lock: joining_funds = min( partner_deposit, self._funds_remaining, self._initial_funding_per_partner, ) if joining_funds <= 0 or self._leaving_state: return self.api.set_total_channel_deposit( self.registry_address, self.token_address, partner_address, joining_funds, ) log.debug( 'joined a channel!', funds=joining_funds, me=pex(self.raiden.address), partner=pex(partner_address), ) def retry_connect(self): """Will be called when new channels in the token network are detected. If the minimum number of channels was not yet established, it will try to open new channels. If the connection manager has no funds, this is a noop. """ with self.lock: if self._funds_remaining <= 0 or self._leaving_state: return open_channels = views.get_channelstate_open( chain_state=views.state_from_raiden(self.raiden), payment_network_id=self.registry_address, token_address=self.token_address, ) if len(open_channels) >= self.initial_channel_target: return self._open_channels() def find_new_partners(self, number: int): """Search the token network for potential channel partners. Args: number: number of partners to return """ open_channels = views.get_channelstate_open( chain_state=views.state_from_raiden(self.raiden), payment_network_id=self.registry_address, token_address=self.token_address, ) known = set(channel_state.partner_state.address for channel_state in open_channels) known.add(self.BOOTSTRAP_ADDR) known.add(self.raiden.address) participants_addresses = views.get_participants_addresses( views.state_from_raiden(self.raiden), self.registry_address, self.token_address, ) available = participants_addresses - known new_partners = list(available)[:number] log.debug('found {} partners'.format(len(available))) return new_partners def _open_channels(self): """ Open channels until there are `self.initial_channel_target` channels open. Do nothing if there are enough channels open already. Note: - This method must be called with the lock held. """ open_channels = views.get_channelstate_open( chain_state=views.state_from_raiden(self.raiden), payment_network_id=self.registry_address, token_address=self.token_address, ) qty_channels_to_open = self.initial_channel_target - len(open_channels) if qty_channels_to_open <= 0: return for partner in self.find_new_partners(qty_channels_to_open): try: self.api.channel_open( self.registry_address, self.token_address, partner, ) except DuplicatedChannelError: # This can fail because of a race condition, where the channel # partner opens first. log.info('partner opened channel first') try: self.api.set_total_channel_deposit( self.registry_address, self.token_address, partner, self._initial_funding_per_partner, ) except AddressWithoutCode: log.warn('connection manager: channel closed just after it was created') except TransactionThrew: log.exception('connection manager: deposit failed') @property def _initial_funding_per_partner(self) -> int: """The calculated funding per partner depending on configuration and overall funding of the ConnectionManager. Note: - This attribute must be accessed with the lock held. """ if self.initial_channel_target: return int( self.funds * (1 - self.joinable_funds_target) / self.initial_channel_target, ) return 0 @property def _funds_remaining(self) -> int: """The remaining funds after subtracting the already deposited amounts. Note: - This attribute must be accessed with the lock held. """ if self.funds > 0: sum_deposits = views.get_our_capacity_for_token_network( views.state_from_raiden(self.raiden), self.registry_address, self.token_address, ) remaining = self.funds - sum_deposits return remaining return 0 @property def _leaving_state(self) -> bool: """True if the node is leaving the token network. Note: - This attribute must be accessed with the lock held. """ return self.initial_channel_target < 1 def __repr__(self) -> str: open_channels = views.get_channelstate_open( chain_state=views.state_from_raiden(self.raiden), payment_network_id=self.registry_address, token_address=self.token_address, ) return f'{self.__class__.__name__}(target={self.initial_channel_target} ' +\ f'channels={len(open_channels)}:{open_channels!r})'
def test_received_lockedtransfer_closedchannel( raiden_network, reveal_timeout, token_addresses, deposit, ): app0, app1 = raiden_network registry_address = app0.raiden.default_registry.address token_address = token_addresses[0] token_network_identifier = views.get_token_network_identifier_by_token_address( views.state_from_app(app0), app0.raiden.default_registry.address, token_address, ) channel0 = get_channelstate(app0, app1, token_network_identifier) RaidenAPI(app1.raiden).channel_close( registry_address, token_address, app0.raiden.address, ) wait_until_block( app0.raiden.chain, app0.raiden.chain.block_number() + 1, ) # Now receive one mediated transfer for the closed channel lock_amount = 10 payment_identifier = 1 expiration = reveal_timeout * 2 mediated_transfer_message = LockedTransfer( chain_id=UNIT_CHAIN_ID, message_identifier=random.randint(0, UINT64_MAX), payment_identifier=payment_identifier, nonce=1, token_network_address=token_network_identifier, token=token_address, channel_identifier=channel0.identifier, transferred_amount=0, locked_amount=lock_amount, recipient=app1.raiden.address, locksroot=UNIT_SECRETHASH, lock=Lock(lock_amount, expiration, UNIT_SECRETHASH), target=app1.raiden.address, initiator=app0.raiden.address, fee=0, ) sign_and_inject( mediated_transfer_message, app0.raiden.private_key, app0.raiden.address, app1, ) # The local state must not change since the channel is already closed assert_synced_channel_state( token_network_identifier, app0, deposit, [], app1, deposit, [], )
def test_clear_closed_queue(raiden_network, token_addresses, deposit): """ Closing a channel clears the respective message queue. """ app0, app1 = raiden_network registry_address = app0.raiden.default_registry.address token_address = token_addresses[0] chain_state0 = views.state_from_app(app0) token_network_identifier = views.get_token_network_identifier_by_token_address( chain_state0, app0.raiden.default_registry.address, token_address, ) token_network = views.get_token_network_by_identifier( chain_state0, token_network_identifier, ) channel_identifier = get_channelstate(app0, app1, token_network_identifier).identifier assert channel_identifier in token_network.partneraddresses_to_channelidentifiers[ app1.raiden.address ] app1.raiden.transport.stop() app1.raiden.transport.get() # make a direct transfer to ensure the nodes have communicated amount = 10 payment_identifier = 1337 app0.raiden.direct_transfer_async( token_network_identifier, amount, app1.raiden.address, identifier=payment_identifier, ) # assert the specific queue is present chain_state0 = views.state_from_app(app0) queues0 = views.get_all_messagequeues(chain_state=chain_state0) assert [ (queue_id, queue) for queue_id, queue in queues0.items() if queue_id.recipient == app1.raiden.address and queue_id.channel_identifier == channel_identifier and queue ] # A ChannelClose event will be generated, this will be polled by both apps RaidenAPI(app0.raiden).channel_close( registry_address, token_address, app1.raiden.address, ) exception = ValueError('Could not get close event') with gevent.Timeout(seconds=30, exception=exception): waiting.wait_for_close( app0.raiden, registry_address, token_address, [channel_identifier], app0.raiden.alarm.sleep_time, ) # assert all queues with this partner are gone or empty chain_state0 = views.state_from_app(app0) queues0 = views.get_all_messagequeues(chain_state=chain_state0) assert not [ (queue_id, queue) for queue_id, queue in queues0.items() if queue_id.recipient == app1.raiden.address and queue ] chain_state1 = views.state_from_app(app1) queues1 = views.get_all_messagequeues(chain_state=chain_state1) assert not [ (queue_id, queue) for queue_id, queue in queues1.items() if queue_id.recipient == app0.raiden.address and queue ]
def _run_smoketest(): print_step('Starting Raiden') # invoke the raiden app app_ = ctx.invoke(app, **args) raiden_api = RaidenAPI(app_.raiden) rest_api = RestAPI(raiden_api) api_server = APIServer(rest_api) (api_host, api_port) = split_endpoint(args['api_address']) api_server.start(api_host, api_port) raiden_api.channel_open( contract_addresses[CONTRACT_TOKEN_NETWORK_REGISTRY], to_canonical_address(token.contract.address), to_canonical_address(TEST_PARTNER_ADDRESS), None, None, ) raiden_api.set_total_channel_deposit( contract_addresses[CONTRACT_TOKEN_NETWORK_REGISTRY], to_canonical_address(token.contract.address), to_canonical_address(TEST_PARTNER_ADDRESS), TEST_DEPOSIT_AMOUNT, ) smoketest_config['contracts']['registry_address'] = to_checksum_address( contract_addresses[CONTRACT_TOKEN_NETWORK_REGISTRY], ) smoketest_config['contracts']['secret_registry_address'] = to_checksum_address( contract_addresses[CONTRACT_SECRET_REGISTRY], ) smoketest_config['contracts']['discovery_address'] = to_checksum_address( contract_addresses[CONTRACT_ENDPOINT_REGISTRY], ) smoketest_config['contracts']['token_address'] = to_checksum_address( token.contract.address, ) success = False try: print_step('Running smoketest') error = run_smoketests(app_.raiden, smoketest_config, debug=debug) if error is not None: append_report('Smoketest assertion error', error) else: success = True finally: app_.stop() ethereum.send_signal(2) err, out = ethereum.communicate() append_report('Ethereum init stdout', ethereum_config['init_log_out'].decode('utf-8')) append_report('Ethereum init stderr', ethereum_config['init_log_err'].decode('utf-8')) append_report('Ethereum stdout', out) append_report('Ethereum stderr', err) append_report('Smoketest configuration', json.dumps(smoketest_config)) if success: print_step(f'Smoketest successful, report was written to {report_file}') else: print_step(f'Smoketest had errors, report was written to {report_file}', error=True) return success
def test_recovery_unhappy_case( raiden_network, number_of_nodes, deposit, token_addresses, network_wait, skip_if_not_udp, events_poll_timeout, ): app0, app1, app2 = raiden_network token_address = token_addresses[0] node_state = views.state_from_app(app0) payment_network_id = app0.raiden.default_registry.address token_network_identifier = views.get_token_network_identifier_by_token_address( node_state, payment_network_id, token_address, ) # make a few transfers from app0 to app2 amount = 1 spent_amount = deposit - 2 for _ in range(spent_amount): mediated_transfer( app0, app2, token_network_identifier, amount, timeout=network_wait * number_of_nodes, ) app0.raiden.stop() host_port = (app0.raiden.config['host'], app0.raiden.config['port']) socket = server._udp_socket(host_port) new_transport = UDPTransport( app0.discovery, socket, app0.raiden.transport.throttle_policy, app0.raiden.config['transport'], ) app0.stop() RaidenAPI(app1.raiden).channel_close( app1.raiden.default_registry.address, token_address, app0.raiden.address, ) channel01 = views.get_channelstate_for( views.state_from_app(app1), app1.raiden.default_registry.address, token_address, app0.raiden.address, ) waiting.wait_for_settle( app1.raiden, app1.raiden.default_registry.address, token_address, [channel01.identifier], events_poll_timeout, ) app0_restart = App( app0.config, app0.raiden.chain, app0.raiden.default_registry, app0.raiden.default_secret_registry, new_transport, app0.raiden.discovery, ) del app0 # from here on the app0_restart should be used assert_synched_channel_state( token_network_identifier, app0_restart, deposit - spent_amount, [], app1, deposit + spent_amount, [], ) assert_synched_channel_state( token_network_identifier, app1, deposit - spent_amount, [], app2, deposit + spent_amount, [], )
def test_settled_lock(token_addresses, raiden_network, deposit): """ Any transfer following a secret revealed must update the locksroot, so that an attacker cannot reuse a secret to double claim a lock.""" app0, app1 = raiden_network token_address = token_addresses[0] amount = 30 address0 = app0.raiden.address address1 = app1.raiden.address deposit0 = deposit deposit1 = deposit token_proxy = app0.raiden.chain.token(token_address) initial_balance0 = token_proxy.balance_of(address0) initial_balance1 = token_proxy.balance_of(address1) # Using a pending mediated transfer because this allows us to compute the # merkle proof identifier = 1 secret = pending_mediated_transfer( raiden_network, token_address, amount, identifier, ) hashlock = sha3(secret) # Compute the merkle proof for the pending transfer, and then unlock channelstate_0_1 = get_channelstate(app0, app1, token_address) lock = channel.get_lock(channelstate_0_1.our_state, hashlock) unlock_proof = channel.compute_proof_for_lock( channelstate_0_1.our_state, secret, lock, ) claim_lock(raiden_network, identifier, token_address, secret) # Make a new transfer direct_transfer(app0, app1, token_address, amount, identifier=1) RaidenAPI(app1.raiden).channel_close(token_address, app0.raiden.address) # The direct transfer locksroot must not contain the unlocked lock, the # withdraw must fail. netting_channel = app1.raiden.chain.netting_channel( channelstate_0_1.identifier) with pytest.raises(Exception): netting_channel.withdraw( UnlockProofState(unlock_proof, lock.encoded, secret)) waiting.wait_for_settle( app1.raiden, app1.raiden.default_registry.address, token_address, [channelstate_0_1.identifier], app1.raiden.alarm.wait_time, ) expected_balance0 = initial_balance0 + deposit0 - amount * 2 expected_balance1 = initial_balance1 + deposit1 + amount * 2 assert token_proxy.balance_of(address0) == expected_balance0 assert token_proxy.balance_of(address1) == expected_balance1
def test_participant_selection(raiden_network, token_addresses): registry_address = raiden_network[0].raiden.default_registry.address # pylint: disable=too-many-locals token_address = token_addresses[0] # connect the first node (will register the token if necessary) RaidenAPI(raiden_network[0].raiden).token_network_connect( registry_address, token_address, 100, ) # connect the other nodes connect_greenlets = [ gevent.spawn( RaidenAPI(app.raiden).token_network_connect, registry_address, token_address, 100, ) for app in raiden_network[1:] ] gevent.wait(connect_greenlets) # wait some blocks to let the network connect wait_blocks = 15 for _ in range(wait_blocks): for app in raiden_network: wait_until_block( app.raiden.chain, app.raiden.chain.block_number() + 1, ) connection_managers = [ app.raiden.connection_manager_for_token( registry_address, token_address, ) for app in raiden_network ] def open_channels_count(connection_managers_): return [ connection_manager.open_channels for connection_manager in connection_managers_ ] assert all(open_channels_count(connection_managers)) def not_saturated(connection_managers_): return [ 1 for connection_manager_ in connection_managers_ if len(connection_manager_.open_channels) < connection_manager_.initial_channel_target ] chain = raiden_network[-1].raiden.chain max_wait = 12 while not_saturated(connection_managers) and max_wait > 0: wait_until_block(chain, chain.block_number() + 1) max_wait -= 1 assert not not_saturated(connection_managers) # Ensure unpartitioned network addresses = [app.raiden.address for app in raiden_network] for connection_manager in connection_managers: assert all( connection_manager.channelgraph.has_path( connection_manager.raiden.address, address, ) for address in addresses) # average channel count acc = (sum( len(connection_manager.open_channels) for connection_manager in connection_managers) / len(connection_managers)) try: # FIXME: depending on the number of channels, this will fail, due to weak # selection algorithm # https://github.com/raiden-network/raiden/issues/576 assert not any( len(connection_manager.open_channels) > 2 * acc for connection_manager in connection_managers) except AssertionError: pass # create a transfer to the leaving node, so we have a channel to settle sender = raiden_network[-1].raiden receiver = raiden_network[0].raiden registry_address = sender.raiden.default_registry.address # assert there is a direct channel receiver -> sender (vv) receiver_channel = RaidenAPI(receiver).get_channel_list( registry_address=registry_address, token_address=token_address, partner_address=sender.address, ) assert len(receiver_channel) == 1 receiver_channel = receiver_channel[0] assert receiver_channel.external_state.opened_block != 0 assert not receiver_channel.received_transfers # assert there is a direct channel sender -> receiver sender_channel = RaidenAPI(sender).get_channel_list( registry_address=registry_address, token_address=token_address, partner_address=receiver.address, ) assert len(sender_channel) == 1 sender_channel = sender_channel[0] assert sender_channel.can_transfer assert sender_channel.external_state.opened_block != 0 RaidenAPI(sender).transfer_and_wait( registry_address, token_address, 1, receiver.address, ) # now receiver has a transfer assert len(receiver_channel.received_transfers) # test `leave()` method connection_manager = connection_managers[0] before = len(connection_manager.receiving_channels) timeout = (connection_manager.min_settle_blocks * connection_manager.raiden.chain.estimate_blocktime() * 5) assert timeout > 0 with gevent.timeout.Timeout(timeout): try: RaidenAPI(raiden_network[0].raiden).token_network_leave( registry_address, token_address, ) except gevent.timeout.Timeout: log.error('timeout while waiting for leave') before_block = connection_manager.raiden.chain.block_number() wait_blocks = connection_manager.min_settle_blocks + 10 wait_until_block( connection_manager.raiden.chain, before_block + wait_blocks, ) assert connection_manager.raiden.chain.block_number >= before_block + wait_blocks wait_until_block( receiver.chain, before_block + wait_blocks, ) while receiver_channel.state != CHANNEL_STATE_SETTLED: gevent.sleep(receiver.alarm.wait_time) after = len(connection_manager.receiving_channels) assert before > after assert after == 0
def test_withdraw(raiden_network, token_addresses, deposit): """Withdraw can be called on a closed channel.""" alice_app, bob_app = raiden_network registry_address = alice_app.raiden.default_registry.address token_address = token_addresses[0] token_proxy = alice_app.raiden.chain.token(token_address) alice_initial_balance = token_proxy.balance_of(alice_app.raiden.address) bob_initial_balance = token_proxy.balance_of(bob_app.raiden.address) alice_to_bob_amount = 10 identifier = 1 secret = pending_mediated_transfer( raiden_network, token_address, alice_to_bob_amount, identifier, ) hashlock = sha3(secret) # This is the current state of the protocol: # # A -> B MediatedTransfer # B -> A SecretRequest # - protocol didn't continue alice_bob_channel = get_channelstate(alice_app, bob_app, token_address) bob_alice_channel = get_channelstate(bob_app, alice_app, token_address) lock = channel.get_lock(alice_bob_channel.our_state, hashlock) assert lock assert_synched_channel_state( token_address, alice_app, deposit, [lock], bob_app, deposit, [], ) # get proof, that locked transfermessage was in merkle tree, with locked.root unlock_proof = channel.compute_proof_for_lock( alice_bob_channel.our_state, secret, lock, ) assert validate_proof( unlock_proof.merkle_proof, merkleroot(bob_alice_channel.partner_state.merkletree), sha3(lock.encoded), ) assert unlock_proof.lock_encoded == lock.encoded assert unlock_proof.secret == secret # A ChannelClose event will be generated, this will be polled by both apps # and each must start a task for calling settle RaidenAPI(bob_app.raiden).channel_close( token_address, alice_app.raiden.address, ) # Unlock will not be called because the secret was not revealed assert lock.expiration > alice_app.raiden.chain.block_number() assert lock.hashlock == sha3(secret) nettingchannel_proxy = bob_app.raiden.chain.netting_channel( bob_alice_channel.identifier, ) nettingchannel_proxy.withdraw(unlock_proof) waiting.wait_for_settle( alice_app.raiden, registry_address, token_address, [alice_bob_channel.identifier], alice_app.raiden.alarm.wait_time, ) alice_bob_channel = get_channelstate(alice_app, bob_app, token_address) bob_alice_channel = get_channelstate(bob_app, alice_app, token_address) alice_netted_balance = alice_initial_balance + deposit - alice_to_bob_amount bob_netted_balance = bob_initial_balance + deposit + alice_to_bob_amount assert token_proxy.balance_of( alice_app.raiden.address) == alice_netted_balance assert token_proxy.balance_of(bob_app.raiden.address) == bob_netted_balance # Now let's query the WAL to see if the state changes were logged as expected state_changes = alice_app.raiden.wal.storage.get_statechanges_by_identifier( from_identifier=0, to_identifier='latest', ) alice_bob_channel = get_channelstate(alice_app, bob_app, token_address) bob_alice_channel = get_channelstate(bob_app, alice_app, token_address) assert must_contain_entry( state_changes, ContractReceiveChannelWithdraw, { 'payment_network_identifier': registry_address, 'token_address': token_address, 'channel_identifier': alice_bob_channel.identifier, 'hashlock': hashlock, 'secret': secret, 'receiver': bob_app.raiden.address, })
def test_recovery_unhappy_case( raiden_network, number_of_nodes, deposit, token_addresses, network_wait, skip_if_not_udp, # pylint: disable=unused-argument retry_timeout, ): app0, app1, app2 = raiden_network token_address = token_addresses[0] chain_state = views.state_from_app(app0) payment_network_id = app0.raiden.default_registry.address token_network_identifier = views.get_token_network_identifier_by_token_address( chain_state, payment_network_id, token_address) # make a few transfers from app0 to app2 amount = 1 spent_amount = deposit - 2 for identifier in range(spent_amount): transfer( initiator_app=app0, target_app=app2, token_address=token_address, amount=amount, identifier=identifier, timeout=network_wait * number_of_nodes, ) app0.raiden.stop() host_port = ( app0.raiden.config["transport"]["udp"]["host"], app0.raiden.config["transport"]["udp"]["port"], ) socket = server._udp_socket(host_port) new_transport = UDPTransport( app0.raiden.address, app0.discovery, socket, app0.raiden.transport.throttle_policy, app0.raiden.config["transport"]["udp"], ) app0.stop() RaidenAPI(app1.raiden).channel_close(app1.raiden.default_registry.address, token_address, app0.raiden.address) channel01 = views.get_channelstate_for( views.state_from_app(app1), app1.raiden.default_registry.address, token_address, app0.raiden.address, ) waiting.wait_for_settle( app1.raiden, app1.raiden.default_registry.address, token_address, [channel01.identifier], retry_timeout, ) raiden_event_handler = RaidenEventHandler() message_handler = MessageHandler() app0_restart = App( config=app0.config, chain=app0.raiden.chain, query_start_block=0, default_registry=app0.raiden.default_registry, default_secret_registry=app0.raiden.default_secret_registry, default_service_registry=app0.raiden.default_service_registry, transport=new_transport, raiden_event_handler=raiden_event_handler, message_handler=message_handler, discovery=app0.raiden.discovery, ) del app0 # from here on the app0_restart should be used app0_restart.start() state_changes = app0_restart.raiden.wal.storage.get_statechanges_by_identifier( from_identifier=0, to_identifier="latest") assert search_for_item( state_changes, ContractReceiveChannelSettled, { "token_network_identifier": token_network_identifier, "channel_identifier": channel01.identifier, }, )
def test_channel_lifecycle(raiden_network, token_addresses, deposit, transport_protocol): node1, node2 = raiden_network token_address = token_addresses[0] token_network_identifier = views.get_token_network_identifier_by_token_address( views.state_from_app(node1), node1.raiden.default_registry.address, token_address, ) api1 = RaidenAPI(node1.raiden) api2 = RaidenAPI(node2.raiden) registry_address = node1.raiden.default_registry.address # nodes don't have a channel, so they are not healthchecking assert api1.get_node_network_state(api2.address) == NODE_NETWORK_UNKNOWN assert api2.get_node_network_state(api1.address) == NODE_NETWORK_UNKNOWN assert not api1.get_channel_list(registry_address, token_address, api2.address) # Make sure invalid arguments to get_channel_list are caught with pytest.raises(UnknownTokenAddress): api1.get_channel_list( registry_address=registry_address, token_address=None, partner_address=api2.address, ) # open is a synchronous api api1.channel_open(node1.raiden.default_registry.address, token_address, api2.address) channels = api1.get_channel_list(registry_address, token_address, api2.address) assert len(channels) == 1 channel12 = get_channelstate(node1, node2, token_network_identifier) assert channel.get_status(channel12) == CHANNEL_STATE_OPENED event_list1 = api1.get_blockchain_events_channel( token_address, channel12.partner_state.address, ) assert any( ( event['event'] == ChannelEvent.OPENED and is_same_address( event['args']['participant1'], to_normalized_address(api1.address), ) and is_same_address( event['args']['participant2'], to_normalized_address(api2.address), ) ) for event in event_list1 ) token_events = api1.get_blockchain_events_token_network( token_address, ) assert token_events[0]['event'] == ChannelEvent.OPENED registry_address = api1.raiden.default_registry.address # Load the new state with the deposit api1.set_total_channel_deposit( registry_address, token_address, api2.address, deposit, ) # let's make sure it's idempotent. Same deposit should raise deposit mismatch limit with pytest.raises(DepositMismatch): api1.set_total_channel_deposit( registry_address, token_address, api2.address, deposit, ) channel12 = get_channelstate(node1, node2, token_network_identifier) assert channel.get_status(channel12) == CHANNEL_STATE_OPENED assert channel.get_balance(channel12.our_state, channel12.partner_state) == deposit assert channel12.our_state.contract_balance == deposit assert api1.get_channel_list(registry_address, token_address, api2.address) == [channel12] # there is a channel open, they must be healthchecking each other assert api1.get_node_network_state(api2.address) == NODE_NETWORK_REACHABLE assert api2.get_node_network_state(api1.address) == NODE_NETWORK_REACHABLE event_list2 = api1.get_blockchain_events_channel( token_address, channel12.partner_state.address, ) assert any( ( event['event'] == ChannelEvent.DEPOSIT and is_same_address( event['args']['participant'], to_normalized_address(api1.address), ) and event['args']['total_deposit'] == deposit ) for event in event_list2 ) api1.channel_close(registry_address, token_address, api2.address) # Load the new state with the channel closed channel12 = get_channelstate(node1, node2, token_network_identifier) event_list3 = api1.get_blockchain_events_channel( token_address, channel12.partner_state.address, ) assert len(event_list3) > len(event_list2) assert any( ( event['event'] == ChannelEvent.CLOSED and is_same_address( event['args']['closing_participant'], to_normalized_address(api1.address), ) ) for event in event_list3 ) assert channel.get_status(channel12) == CHANNEL_STATE_CLOSED settlement_block = ( channel12.close_transaction.finished_block_number + channel12.settle_timeout + 10 # arbitrary number of additional blocks, used to wait for the settle() call ) wait_until_block(node1.raiden.chain, settlement_block + DEFAULT_NUMBER_OF_BLOCK_CONFIRMATIONS) state_changes = node1.raiden.wal.storage.get_statechanges_by_identifier( from_identifier=0, to_identifier='latest', ) assert must_contain_entry(state_changes, ContractReceiveChannelSettled, { 'token_network_identifier': token_network_identifier, 'channel_identifier': channel12.identifier, })
def test_settle_is_automatically_called(raiden_network, token_addresses, deposit): """Settle is automatically called by one of the nodes.""" app0, app1 = raiden_network registry_address = app0.raiden.default_registry.address token_address = token_addresses[0] channel_identifier = get_channelstate(app0, app1, token_address).identifier # A ChannelClose event will be generated, this will be polled by both apps # and each must start a task for calling settle RaidenAPI(app1.raiden).channel_close(token_address, app0.raiden.address) waiting.wait_for_settle( app0.raiden, registry_address, token_address, [channel_identifier], app0.raiden.alarm.wait_time, ) assert_synched_channel_state( token_address, app0, deposit, [], app1, deposit, [], ) state_changes = app0.raiden.wal.storage.get_statechanges_by_identifier( from_identifier=0, to_identifier='latest', ) channel_state = get_channelstate(app0, app1, token_address) assert channel_state.close_transaction.finished_block_number assert channel_state.settle_transaction.finished_block_number assert must_contain_entry( state_changes, ContractReceiveChannelClosed, { 'payment_network_identifier': registry_address, 'token_address': token_address, 'channel_identifier': channel_identifier, 'closing_address': app1.raiden.address, 'closed_block_number': channel_state.close_transaction.finished_block_number, }) assert must_contain_entry( state_changes, ContractReceiveChannelSettled, { 'payment_network_identifier': registry_address, 'token_address': token_address, 'channel_identifier': channel_identifier, 'settle_block_number': channel_state.settle_transaction.finished_block_number, })
def test_settle_is_automatically_called(raiden_network, token_addresses, deposit): """Settle is automatically called by one of the nodes.""" app0, app1 = raiden_network registry_address = app0.raiden.default_registry.address token_address = token_addresses[0] token_network_identifier = views.get_token_network_identifier_by_token_address( views.state_from_app(app0), app0.raiden.default_registry.address, token_address, ) token_network = views.get_token_network_by_identifier( views.state_from_app(app0), token_network_identifier, ) channel_identifier = get_channelstate(app0, app1, token_network_identifier).identifier assert channel_identifier in token_network.partneraddresses_to_channels[ app1.raiden.address] # A ChannelClose event will be generated, this will be polled by both apps # and each must start a task for calling settle RaidenAPI(app1.raiden).channel_close( registry_address, token_address, app0.raiden.address, ) waiting.wait_for_close( app0.raiden, registry_address, token_address, [channel_identifier], app0.raiden.alarm.sleep_time, ) channel_state = views.get_channelstate_for( views.state_from_raiden(app0.raiden), registry_address, token_address, app1.raiden.address, ) assert channel_state.close_transaction.finished_block_number waiting.wait_for_settle( app0.raiden, registry_address, token_address, [channel_identifier], app0.raiden.alarm.sleep_time, ) token_network = views.get_token_network_by_identifier( views.state_from_app(app0), token_network_identifier, ) assert channel_identifier not in token_network.partneraddresses_to_channels[ app1.raiden.address] state_changes = app0.raiden.wal.storage.get_statechanges_by_identifier( from_identifier=0, to_identifier='latest', ) assert must_contain_entry( state_changes, ContractReceiveChannelClosed, { 'token_network_identifier': token_network_identifier, 'channel_identifier': channel_identifier, 'transaction_from': app1.raiden.address, 'block_number': channel_state.close_transaction.finished_block_number, }) assert must_contain_entry( state_changes, ContractReceiveChannelSettled, { 'token_network_identifier': token_network_identifier, 'channel_identifier': channel_identifier, })
def test_different_view_of_last_bp_during_unlock( raiden_chain, number_of_nodes, token_addresses, deposit, network_wait, retry_timeout, # UDP does not seem to retry messages until processed # https://github.com/raiden-network/raiden/issues/3185 skip_if_not_matrix, ): """Test for https://github.com/raiden-network/raiden/issues/3196#issuecomment-449163888""" # Topology: # # 0 -> 1 -> 2 # app0, app1, app2 = raiden_chain token_address = token_addresses[0] payment_network_identifier = app0.raiden.default_registry.address token_network_identifier = views.get_token_network_identifier_by_token_address( views.state_from_app(app0), payment_network_identifier, token_address, ) # make a transfer to test the path app0 -> app1 -> app2 identifier_path = 1 amount_path = 1 mediated_transfer( app0, app2, token_network_identifier, amount_path, identifier_path, timeout=network_wait * number_of_nodes, ) # drain the channel app1 -> app2 identifier_drain = 2 amount_drain = deposit * 8 // 10 mediated_transfer( initiator_app=app1, target_app=app2, token_network_identifier=token_network_identifier, amount=amount_drain, identifier=identifier_drain, timeout=network_wait, ) # wait for the nodes to sync gevent.sleep(0.2) assert_synced_channel_state( token_network_identifier, app0, deposit - amount_path, [], app1, deposit + amount_path, [], ) assert_synced_channel_state( token_network_identifier, app1, deposit - amount_path - amount_drain, [], app2, deposit + amount_path + amount_drain, [], ) # app0 -> app1 -> app2 is the only available path, but the channel app1 -> # app2 doesn't have capacity, so a refund will be sent on app1 -> app0 identifier_refund = 3 amount_refund = 50 async_result = app0.raiden.mediated_transfer_async( token_network_identifier, amount_refund, app2.raiden.address, identifier_refund, ) assert async_result.wait() is False, 'there is no path with capacity, the transfer must fail' gevent.sleep(0.2) # A lock structure with the correct amount send_locked = raiden_events_must_contain_entry( app0.raiden, SendLockedTransfer, {'transfer': {'lock': {'amount': amount_refund}}}, ) assert send_locked secrethash = send_locked.transfer.lock.secrethash send_refund = raiden_events_must_contain_entry(app1.raiden, SendRefundTransfer, {}) assert send_refund lock = send_locked.transfer.lock refund_lock = send_refund.transfer.lock assert lock.amount == refund_lock.amount assert lock.secrethash assert lock.expiration assert lock.secrethash == refund_lock.secrethash # Both channels have the amount locked because of the refund message assert_synced_channel_state( token_network_identifier, app0, deposit - amount_path, [lockstate_from_lock(lock)], app1, deposit + amount_path, [lockstate_from_lock(refund_lock)], ) assert_synced_channel_state( token_network_identifier, app1, deposit - amount_path - amount_drain, [], app2, deposit + amount_path + amount_drain, [], ) # Additional checks for LockExpired causing nonce mismatch after refund transfer: # https://github.com/raiden-network/raiden/issues/3146#issuecomment-447378046 # At this point make sure that the initiator has not deleted the payment task assert secrethash in state_from_raiden(app0.raiden).payment_mapping.secrethashes_to_task with dont_handle_node_change_network_state(): # now app1 goes offline app1.raiden.stop() app1.raiden.get() assert not app1.raiden # Wait for lock expiration so that app0 sends a LockExpired wait_for_block( raiden=app0.raiden, block_number=lock.expiration + DEFAULT_NUMBER_OF_BLOCK_CONFIRMATIONS * 2 + 1, retry_timeout=retry_timeout, ) # make sure that app0 sent a lock expired message for the secrethash wait_for_raiden_event( app0.raiden, SendLockExpired, {'secrethash': secrethash}, retry_timeout, ) # now app0 closes the channel RaidenAPI(app0.raiden).channel_close( registry_address=payment_network_identifier, token_address=token_address, partner_address=app1.raiden.address, ) # and now app1 comes back online app1.raiden.start() channel_identifier = get_channelstate(app0, app1, token_network_identifier).identifier # and we wait for settlement wait_for_settle( raiden=app0.raiden, payment_network_id=payment_network_identifier, token_address=token_address, channel_ids=[channel_identifier], retry_timeout=app0.raiden.alarm.sleep_time, ) with gevent.Timeout(10): wait_for_state_change( app0.raiden, ContractReceiveChannelBatchUnlock, {}, # {'participant': secrethash}, retry_timeout, )
def _run_smoketest(): print_step('Starting Raiden') # invoke the raiden app app = run_app(**args) raiden_api = RaidenAPI(app.raiden) rest_api = RestAPI(raiden_api) api_server = APIServer(rest_api) (api_host, api_port) = split_endpoint(args['api_address']) api_server.start(api_host, api_port) raiden_api.channel_open( contract_addresses[CONTRACT_TOKEN_NETWORK_REGISTRY], to_canonical_address(token.contract.address), to_canonical_address(TEST_PARTNER_ADDRESS), None, None, ) raiden_api.set_total_channel_deposit( contract_addresses[CONTRACT_TOKEN_NETWORK_REGISTRY], to_canonical_address(token.contract.address), to_canonical_address(TEST_PARTNER_ADDRESS), TEST_DEPOSIT_AMOUNT, ) smoketest_config['contracts'][ 'registry_address'] = to_checksum_address( contract_addresses[CONTRACT_TOKEN_NETWORK_REGISTRY], ) smoketest_config['contracts'][ 'secret_registry_address'] = to_checksum_address( contract_addresses[CONTRACT_SECRET_REGISTRY], ) smoketest_config['contracts'][ 'discovery_address'] = to_checksum_address( contract_addresses[CONTRACT_ENDPOINT_REGISTRY], ) smoketest_config['contracts']['token_address'] = to_checksum_address( token.contract.address, ) success = False try: print_step('Running smoketest') error = run_smoketests(app.raiden, smoketest_config, debug=debug) if error is not None: append_report('Smoketest assertion error', error) else: success = True finally: app.stop() ethereum.send_signal(2) err, out = ethereum.communicate() append_report('Ethereum init stdout', ethereum_config['init_log_out'].decode('utf-8')) append_report('Ethereum init stderr', ethereum_config['init_log_err'].decode('utf-8')) append_report('Ethereum stdout', out) append_report('Ethereum stderr', err) append_report('Smoketest configuration', json.dumps(smoketest_config)) if success: print_step( f'Smoketest successful, report was written to {report_file}') else: print_step( f'Smoketest had errors, report was written to {report_file}', error=True) return success
def test_recovery_blockchain_events( raiden_network, token_addresses, network_wait, skip_if_not_udp, # pylint: disable=unused-argument ): """ Close one of the two raiden apps that have a channel between them, have the counterparty close the channel and then make sure the restarted app sees the change """ app0, app1 = raiden_network token_address = token_addresses[0] app0.raiden.stop() host_port = ( app0.raiden.config["transport"]["udp"]["host"], app0.raiden.config["transport"]["udp"]["port"], ) socket = server._udp_socket(host_port) new_transport = UDPTransport( app0.raiden.address, app0.discovery, socket, app0.raiden.transport.throttle_policy, app0.raiden.config["transport"]["udp"], ) app1_api = RaidenAPI(app1.raiden) app1_api.channel_close( registry_address=app0.raiden.default_registry.address, token_address=token_address, partner_address=app0.raiden.address, ) app0.stop() gevent.sleep(1) raiden_event_handler = RaidenEventHandler() message_handler = MessageHandler() app0_restart = App( config=app0.config, chain=app0.raiden.chain, query_start_block=0, default_registry=app0.raiden.default_registry, default_secret_registry=app0.raiden.default_secret_registry, default_service_registry=app0.raiden.default_service_registry, transport=new_transport, raiden_event_handler=raiden_event_handler, message_handler=message_handler, discovery=app0.raiden.discovery, ) del app0 # from here on the app0_restart should be used app0_restart.raiden.start() # wait for the nodes' healthcheck to update the network statuses waiting.wait_for_healthy(app0_restart.raiden, app1.raiden.address, network_wait) waiting.wait_for_healthy(app1.raiden, app0_restart.raiden.address, network_wait) restarted_state_changes = app0_restart.raiden.wal.storage.get_statechanges_by_identifier( 0, "latest") assert search_for_item(restarted_state_changes, ContractReceiveChannelClosed, {})
class ConnectionManager: """The ConnectionManager provides a high level abstraction for connecting to a Token network. Note: It is initialized with 0 funds; a connection to the token network will be only established _after_ calling `connect(funds)` """ # XXX Hack: for bootstrapping, the first node on a network opens a channel # with this address to become visible. BOOTSTRAP_ADDR_HEX = b'2' * 40 BOOTSTRAP_ADDR = unhexlify(BOOTSTRAP_ADDR_HEX) def __init__(self, raiden, token_network_identifier): chain_state = views.state_from_raiden(raiden) token_network_state = views.get_token_network_by_identifier( chain_state, token_network_identifier, ) token_network_registry = views.get_token_network_registry_by_token_network_identifier( chain_state, token_network_identifier, ) # TODO: # - Add timeout for transaction polling, used to overwrite the RaidenAPI # defaults # - Add a proper selection strategy (#576) self.funds = 0 self.initial_channel_target = 0 self.joinable_funds_target = 0 self.raiden = raiden self.registry_address = token_network_registry.address self.token_network_identifier = token_network_identifier self.token_address = token_network_state.token_address self.lock = Semaphore() #: protects self.funds and self.initial_channel_target self.api = RaidenAPI(raiden) def connect( self, funds: typing.TokenAmount, initial_channel_target: int = 3, joinable_funds_target: float = 0.4, ): """Connect to the network. Subsequent calls to `connect` are allowed, but will only affect the spendable funds and the connection strategy parameters for the future. `connect` will not close any channels. Note: the ConnectionManager does not discriminate manually opened channels from automatically opened ones. If the user manually opened channels, those deposit amounts will affect the funding per channel and the number of new channels opened. Args: funds: Target amount of tokens spendable to join the network. initial_channel_target: Target number of channels to open. joinable_funds_target: Amount of funds not initially assigned. """ token = self.raiden.chain.token(self.token_address) token_balance = token.balance_of(self.raiden.address) if token_balance < funds: raise InvalidAmount( f'Insufficient balance for token {pex(self.token_address)}', ) if funds <= 0: raise InvalidAmount( 'The funds to use in the connection need to be a positive integer', ) with self.lock: self.funds = funds self.initial_channel_target = initial_channel_target self.joinable_funds_target = joinable_funds_target log_open_channels(self.raiden, self.registry_address, self.token_address, funds) qty_network_channels = views.count_token_network_channels( views.state_from_raiden(self.raiden), self.registry_address, self.token_address, ) if not qty_network_channels: log.debug('bootstrapping token network.') # make ourselves visible self.api.channel_open( self.registry_address, self.token_address, self.BOOTSTRAP_ADDR, ) else: self._open_channels() def leave_async(self): """ Async version of `leave()` """ leave_result = AsyncResult() gevent.spawn(self.leave).link(leave_result) return leave_result def leave(self, registry_address): """ Leave the token network. This implies closing all channels and waiting for all channels to be settled. """ with self.lock: self.initial_channel_target = 0 channels_to_close = views.get_channelstate_open( chain_state=views.state_from_raiden(self.raiden), payment_network_id=registry_address, token_address=self.token_address, ) partner_addresses = [ channel_state.partner_state.address for channel_state in channels_to_close ] self.api.channel_batch_close( registry_address, self.token_address, partner_addresses, ) channel_ids = [ channel_state.identifier for channel_state in channels_to_close ] waiting.wait_for_settle( self.raiden, registry_address, self.token_address, channel_ids, self.raiden.alarm.sleep_time, ) return channels_to_close def join_channel(self, partner_address, partner_deposit): """Will be called, when we were selected as channel partner by another node. It will fund the channel with up to the partners deposit, but not more than remaining funds or the initial funding per channel. If the connection manager has no funds, this is a noop. """ with self.lock: joining_funds = min( partner_deposit, self._funds_remaining, self._initial_funding_per_partner, ) if joining_funds <= 0 or self._leaving_state: return try: self.api.set_total_channel_deposit( self.registry_address, self.token_address, partner_address, joining_funds, ) except RaidenRecoverableError: log.exception('connection manager join: channel not in opened state') else: log.debug( 'joined a channel!', funds=joining_funds, me=pex(self.raiden.address), partner=pex(partner_address), ) def retry_connect(self): """Will be called when new channels in the token network are detected. If the minimum number of channels was not yet established, it will try to open new channels. If the connection manager has no funds, this is a noop. """ with self.lock: if self._funds_remaining > 0 and not self._leaving_state: self._open_channels() def _find_new_partners(self): """ Search the token network for potential channel partners. """ open_channels = views.get_channelstate_open( chain_state=views.state_from_raiden(self.raiden), payment_network_id=self.registry_address, token_address=self.token_address, ) known = set(channel_state.partner_state.address for channel_state in open_channels) known.add(self.BOOTSTRAP_ADDR) known.add(self.raiden.address) participants_addresses = views.get_participants_addresses( views.state_from_raiden(self.raiden), self.registry_address, self.token_address, ) available = participants_addresses - known available = list(available) shuffle(available) new_partners = available log.debug('found {} partners'.format(len(available))) return new_partners def _join_partner(self, partner: Address): """ Ensure a channel exists with partner and is funded in our side """ try: self.api.channel_open( self.registry_address, self.token_address, partner, ) except DuplicatedChannelError: # If channel already exists (either because partner created it, # or it's nonfunded channel), continue to ensure it's funded pass try: self.api.set_total_channel_deposit( self.registry_address, self.token_address, partner, self._initial_funding_per_partner, ) except TransactionThrew: log.exception('connection manager: deposit failed') except RaidenRecoverableError: log.exception('connection manager: channel not in opened state') except InsufficientFunds as e: log.error(f'connection manager: {str(e)}') def _open_channels(self) -> bool: """ Open channels until there are `self.initial_channel_target` channels open. Do nothing if there are enough channels open already. Note: - This method must be called with the lock held. Return: - False if no channels could be opened """ open_channels = views.get_channelstate_open( chain_state=views.state_from_raiden(self.raiden), payment_network_id=self.registry_address, token_address=self.token_address, ) # don't consider the bootstrap channel open_channels = [ channel_state for channel_state in open_channels if channel_state.partner_state.address != self.BOOTSTRAP_ADDR ] funded_channels = [ channel_state for channel_state in open_channels if channel_state.our_state.contract_balance >= self._initial_funding_per_partner ] nonfunded_channels = [ channel_state for channel_state in open_channels if channel_state not in funded_channels ] possible_new_partners = self._find_new_partners() if possible_new_partners == 0: return False # if we already met our target, break if len(funded_channels) >= self.initial_channel_target: return False # if we didn't, but there's no nonfunded channels and no available partners # it means the network is smaller than our target, so we should also break if not nonfunded_channels and possible_new_partners == 0: return False n_to_join = self.initial_channel_target - len(funded_channels) nonfunded_partners = [ channel_state.partner_state.address for channel_state in nonfunded_channels ] # first, fund nonfunded channels, then open and fund with possible_new_partners, # until initial_channel_target of funded channels is met join_partners = (nonfunded_partners + possible_new_partners)[:n_to_join] greenlets = [ gevent.spawn(self._join_partner, partner) for partner in join_partners ] gevent.joinall(greenlets, raise_error=True) return True @property def _initial_funding_per_partner(self) -> int: """The calculated funding per partner depending on configuration and overall funding of the ConnectionManager. Note: - This attribute must be accessed with the lock held. """ if self.initial_channel_target: return int( self.funds * (1 - self.joinable_funds_target) / self.initial_channel_target, ) return 0 @property def _funds_remaining(self) -> int: """The remaining funds after subtracting the already deposited amounts. Note: - This attribute must be accessed with the lock held. """ if self.funds > 0: token = self.raiden.chain.token(self.token_address) token_balance = token.balance_of(self.raiden.address) sum_deposits = views.get_our_capacity_for_token_network( views.state_from_raiden(self.raiden), self.registry_address, self.token_address, ) return min(self.funds - sum_deposits, token_balance) return 0 @property def _leaving_state(self) -> bool: """True if the node is leaving the token network. Note: - This attribute must be accessed with the lock held. """ return self.initial_channel_target < 1 def __repr__(self) -> str: open_channels = views.get_channelstate_open( chain_state=views.state_from_raiden(self.raiden), payment_network_id=self.registry_address, token_address=self.token_address, ) return f'{self.__class__.__name__}(target={self.initial_channel_target} ' +\ f'channels={len(open_channels)}:{open_channels!r})'
def test_echo_node_lottery(token_addresses, raiden_chain): app0, app1, app2, app3, echo_app, app4, app5, app6 = raiden_chain address_to_app = {app.raiden.address: app for app in raiden_chain} token_address = token_addresses[0] echo_api = RaidenAPI(echo_app.raiden) echo_node = EchoNode(echo_api, token_address) echo_node.ready.wait(timeout=30) assert echo_node.ready.is_set() expected = list() # Let 6 participants enter the pool amount = 7 for num, app in enumerate([app0, app1, app2, app3, app4, app5]): transfer_event = RaidenAPI(app.raiden).transfer_async( token_address, amount, echo_app.raiden.address, 10**(num + 1)) transfer_event.wait(timeout=20) expected.append(amount) # test duplicated identifier + amount is ignored transfer_event = RaidenAPI(app5.raiden).transfer_async( token_address, amount, # same amount as before echo_app.raiden.address, 10**6 # app5 used this identifier before ).wait(timeout=20) # test pool size querying pool_query_identifier = 77 # unused identifier different from previous one transfer_event = RaidenAPI(app5.raiden).transfer_async( token_address, amount, echo_app.raiden.address, pool_query_identifier).wait(timeout=20) expected.append(amount) # fill the pool transfer_event = RaidenAPI(app6.raiden).transfer_async( token_address, amount, echo_app.raiden.address, 10**7).wait(timeout=20) expected.append(amount) while echo_node.num_handled_transfers < len(expected): gevent.sleep(.5) received = {} # Check that payout was generated and pool_size_query answered for handled_transfer in echo_node.seen_transfers: app = address_to_app[handled_transfer['initiator']] events = get_channel_events_for_token(app, token_address, 0) for event in events: if event['_event_type'] == 'EventTransferReceivedSuccess': received[repr(event)] = event assert len(received) == 2 received = sorted(received.values(), key=lambda transfer: transfer['amount']) pool_query = received[0] assert pool_query['amount'] == 6 assert pool_query['identifier'] == pool_query_identifier + 6 winning_transfer = received[1] assert winning_transfer['initiator'] == echo_app.raiden.address assert winning_transfer['amount'] == 49 assert (winning_transfer['identifier'] - 49) % 10 == 0 echo_node.stop()
def test_token_registered_race(raiden_chain, token_amount): """Test recreating the scenario described on issue: https://github.com/raiden-network/raiden/issues/784""" app0, app1 = raiden_chain api0 = RaidenAPI(app0.raiden) api1 = RaidenAPI(app1.raiden) # Recreate the race condition by making sure the non-registering app won't # register at all by watching for the TokenAdded blockchain event. app1.raiden.alarm.remove_callback(app1.raiden.poll_blockchain_events) token_address = app1.raiden.chain.deploy_contract( contract_name='HumanStandardToken', contract_path=get_contract_path('HumanStandardToken.sol'), constructor_parameters=(token_amount, 'raiden', 2, 'Rd'), ) gevent.sleep(1) assert token_address not in api0.get_tokens_list() assert token_address not in api1.get_tokens_list() api0.token_network_register(token_address) gevent.sleep(1) assert token_address in api0.get_tokens_list() assert token_address not in api1.get_tokens_list() # The next time when the event is polled, the token is registered app1.raiden.poll_blockchain_events() assert token_address in api1.get_tokens_list()
def test_settled_lock(token_addresses, raiden_network, deposit): """ Any transfer following a secret revealed must update the locksroot, so hat an attacker cannot reuse a secret to double claim a lock.""" app0, app1 = raiden_network registry_address = app0.raiden.default_registry.address token_address = token_addresses[0] amount = 30 token_network_identifier = views.get_token_network_identifier_by_token_address( views.state_from_app(app0), app0.raiden.default_registry.address, token_address, ) address0 = app0.raiden.address address1 = app1.raiden.address deposit0 = deposit deposit1 = deposit token_proxy = app0.raiden.chain.token(token_address) initial_balance0 = token_proxy.balance_of(address0) initial_balance1 = token_proxy.balance_of(address1) # Using a pending mediated transfer because this allows us to compute the # merkle proof identifier = 1 secret = pending_mediated_transfer( raiden_network, token_network_identifier, amount, identifier, ) # Save the merkle tree leaves from the pending transfer, used to test the unlock channelstate_0_1 = get_channelstate(app0, app1, token_network_identifier) batch_unlock = channel.get_batch_unlock(channelstate_0_1.our_state) assert batch_unlock claim_lock(raiden_network, identifier, token_network_identifier, secret) # Make a new transfer direct_transfer(app0, app1, token_network_identifier, amount, identifier=1) RaidenAPI(app1.raiden).channel_close( registry_address, token_address, app0.raiden.address, ) waiting.wait_for_settle( app1.raiden, app1.raiden.default_registry.address, token_address, [channelstate_0_1.identifier], app1.raiden.alarm.sleep_time, ) netting_channel = app1.raiden.chain.payment_channel( token_network_identifier, channelstate_0_1.identifier, ) # The direct transfer locksroot must not contain the unlocked lock, the # unlock must fail. with pytest.raises(Exception): netting_channel.unlock( channelstate_0_1.partner_state.address, batch_unlock, ) expected_balance0 = initial_balance0 + deposit0 - amount * 2 expected_balance1 = initial_balance1 + deposit1 + amount * 2 assert token_proxy.balance_of(address0) == expected_balance0 assert token_proxy.balance_of(address1) == expected_balance1
def test_recovery_blockchain_events( raiden_network, number_of_nodes, deposit, token_addresses, network_wait, skip_if_not_udp, ): """ Close one of the two raiden apps that have a channel between them, have the counterparty close the channel and then make sure the restarted app sees the change """ app0, app1 = raiden_network token_address = token_addresses[0] app0.raiden.stop() host_port = ( app0.raiden.config['transport']['udp']['host'], app0.raiden.config['transport']['udp']['port'], ) socket = server._udp_socket(host_port) new_transport = UDPTransport( app0.raiden.address, app0.discovery, socket, app0.raiden.transport.throttle_policy, app0.raiden.config['transport']['udp'], ) app1_api = RaidenAPI(app1.raiden) app1_api.channel_close( registry_address=app0.raiden.default_registry.address, token_address=token_address, partner_address=app0.raiden.address, ) app0.stop() import gevent gevent.sleep(1) raiden_event_handler = RaidenEventHandler() message_handler = MessageHandler() app0_restart = App( config=app0.config, chain=app0.raiden.chain, query_start_block=0, default_registry=app0.raiden.default_registry, default_secret_registry=app0.raiden.default_secret_registry, transport=new_transport, raiden_event_handler=raiden_event_handler, message_handler=message_handler, discovery=app0.raiden.discovery, ) del app0 # from here on the app0_restart should be used app0_restart.raiden.start() # wait for the nodes' healthcheck to update the network statuses waiting.wait_for_healthy( app0_restart.raiden, app1.raiden.address, network_wait, ) waiting.wait_for_healthy( app1.raiden, app0_restart.raiden.address, network_wait, ) restarted_state_changes = app0_restart.raiden.wal.storage.get_statechanges_by_identifier( 0, 'latest', ) assert must_contain_entry(restarted_state_changes, ContractReceiveChannelClosed, {})
def test_automatic_dispute(raiden_network, deposit, token_addresses): app0, app1 = raiden_network registry_address = app0.raiden.default_registry.address token_address = token_addresses[0] token_network_identifier = views.get_token_network_identifier_by_token_address( views.state_from_app(app0), app0.raiden.default_registry.address, token_address, ) channel0 = get_channelstate(app0, app1, token_network_identifier) token_proxy = app0.raiden.chain.token(channel0.token_address) initial_balance0 = token_proxy.balance_of(app0.raiden.address) initial_balance1 = token_proxy.balance_of(app1.raiden.address) amount0_1 = 10 direct_transfer( app0, app1, token_network_identifier, amount0_1, ) amount1_1 = 50 direct_transfer( app1, app0, token_network_identifier, amount1_1, ) amount0_2 = 60 direct_transfer( app0, app1, token_network_identifier, amount0_2, ) # Alice can only provide one of Bob's transfer, so she is incentivized to # use the one with the largest transferred_amount. RaidenAPI(app0.raiden).channel_close( registry_address, token_address, app1.raiden.address, ) # Bob needs to provide a transfer otherwise its netted balance will be # wrong, so he is incentivised to use Alice's transfer with the largest # transferred_amount. # # This is done automatically # channel1.external_state.update_transfer( # alice_second_transfer, # ) waiting.wait_for_settle( app0.raiden, registry_address, token_address, [channel0.identifier], app0.raiden.alarm.sleep_time, ) # check that the channel is properly settled and that Bob's client # automatically called updateTransfer() to reflect the actual transactions assert token_proxy.balance_of(token_network_identifier) == 0 total0 = amount0_1 + amount0_2 total1 = amount1_1 expected_balance0 = initial_balance0 + deposit - total0 + total1 expected_balance1 = initial_balance1 + deposit + total0 - total1 assert token_proxy.balance_of(app0.raiden.address) == expected_balance0 assert token_proxy.balance_of(app1.raiden.address) == expected_balance1
def test_token_registered_race(raiden_chain, token_amount, retry_timeout): """If a token is registered it must appear on the token list. If two nodes register the same token one of the transactions will fail. The node that receives an error for "already registered token" must see the token in the token list. Issue: #784 """ app0, app1 = raiden_chain api0 = RaidenAPI(app0.raiden) api1 = RaidenAPI(app1.raiden) # Recreate the race condition by making sure the non-registering app won't # register at all by watching for the TokenAdded blockchain event. event_listeners = app1.raiden.blockchain_events.event_listeners app1.raiden.blockchain_events.event_listeners = list() token_address = deploy_contract_web3( CONTRACT_HUMAN_STANDARD_TOKEN, app1.raiden.chain.client, num_confirmations=None, constructor_arguments=( token_amount, 2, 'raiden', 'Rd', ), ) gevent.sleep(1) registry_address = app0.raiden.default_registry.address assert token_address not in api0.get_tokens_list(registry_address) assert token_address not in api1.get_tokens_list(registry_address) api0.token_network_register(registry_address, token_address) gevent.sleep(1) assert token_address in api0.get_tokens_list(registry_address) assert token_address not in api1.get_tokens_list(registry_address) # The next time when the event is polled, the token is registered app1.raiden.blockchain_events.event_listeners = event_listeners waiting.wait_for_block( app1.raiden, app1.raiden.get_block_number() + 1, retry_timeout, ) assert token_address in api1.get_tokens_list(registry_address)
def _run_app(self): from raiden.ui.console import Console from raiden.api.python import RaidenAPI # this catches exceptions raised when waiting for the stalecheck to complete try: app_ = run_app(**self._options) except EthNodeCommunicationError: print( '\n' 'Could not contact the ethereum node through JSON-RPC.\n' 'Please make sure that JSON-RPC is enabled for these interfaces:\n' '\n' ' eth_*, net_*, web3_*\n' '\n' 'geth: https://github.com/ethereum/go-ethereum/wiki/Management-APIs\n', ) sys.exit(1) domain_list = [] if self._options['rpccorsdomain']: if ',' in self._options['rpccorsdomain']: for domain in self._options['rpccorsdomain'].split(','): domain_list.append(str(domain)) else: domain_list.append(str(self._options['rpccorsdomain'])) self._raiden_api = RaidenAPI(app_.raiden) api_server = None if self._options['rpc']: rest_api = RestAPI(self._raiden_api) api_server = APIServer( rest_api, cors_domain_list=domain_list, web_ui=self._options['web_ui'], eth_rpc_endpoint=self._options['eth_rpc_endpoint'], ) (api_host, api_port) = split_endpoint(self._options['api_address']) try: api_server.start(api_host, api_port) except APIServerPortInUseError: print( 'ERROR: API Address %s:%s is in use. ' 'Use --api-address <host:port> to specify port to listen on.' % (api_host, api_port), ) sys.exit(1) print( 'The Raiden API RPC server is now running at http://{}:{}/.\n\n' 'See the Raiden documentation for all available endpoints at\n' 'http://raiden-network.readthedocs.io/en/stable/rest_api.html'. format( api_host, api_port, ), ) if self._options['console']: console = Console(app_) console.start() # spawn a greenlet to handle the version checking gevent.spawn(check_version) # spawn a greenlet to handle the gas reserve check gevent.spawn(check_gas_reserve, app_.raiden) self._startup_hook() # wait for interrupt event = RaidenGreenletEvent() gevent.signal(signal.SIGQUIT, event.set) gevent.signal(signal.SIGTERM, event.set) gevent.signal(signal.SIGINT, event.set) try: event.wait() print('Signal received. Shutting down ...') except RaidenError as ex: click.secho(f'FATAL: {ex}', fg='red') except Exception as ex: with NamedTemporaryFile( 'w', prefix= f'raiden-exception-{datetime.utcnow():%Y-%m-%dT%H-%M}', suffix='.txt', delete=False, ) as traceback_file: traceback.print_exc(file=traceback_file) click.secho( f'FATAL: An unexpected exception occured. ' f'A traceback has been written to {traceback_file.name}\n' f'{ex}', fg='red', ) finally: self._shutdown_hook() if api_server: api_server.stop() return app_
def _start_services(self): from raiden.ui.console import Console from raiden.api.python import RaidenAPI config = deepcopy(App.DEFAULT_CONFIG) if self._options.get('extra_config', dict()): merge_dict(config, self._options['extra_config']) del self._options['extra_config'] self._options['config'] = config if self._options['showconfig']: print('Configuration Dump:') dump_config(config) dump_cmd_options(self._options) dump_module('settings', settings) dump_module('constants', constants) # this catches exceptions raised when waiting for the stalecheck to complete try: app_ = run_app(**self._options) except (EthNodeCommunicationError, RequestsConnectionError): print(ETHEREUM_NODE_COMMUNICATION_ERROR) sys.exit(1) except EthNodeInterfaceError as e: click.secho(str(e), fg='red') sys.exit(1) tasks = [app_.raiden] # RaidenService takes care of Transport and AlarmTask domain_list = [] if self._options['rpccorsdomain']: if ',' in self._options['rpccorsdomain']: for domain in self._options['rpccorsdomain'].split(','): domain_list.append(str(domain)) else: domain_list.append(str(self._options['rpccorsdomain'])) self._raiden_api = RaidenAPI(app_.raiden) if self._options['rpc']: rest_api = RestAPI(self._raiden_api) api_server = APIServer( rest_api, cors_domain_list=domain_list, web_ui=self._options['web_ui'], eth_rpc_endpoint=self._options['eth_rpc_endpoint'], ) (api_host, api_port) = split_endpoint(self._options['api_address']) try: api_server.start(api_host, api_port) except APIServerPortInUseError: click.secho( f'ERROR: API Address {api_host}:{api_port} is in use. ' f'Use --api-address <host:port> to specify a different port.', fg='red', ) sys.exit(1) print( 'The Raiden API RPC server is now running at http://{}:{}/.\n\n' 'See the Raiden documentation for all available endpoints at\n' 'http://raiden-network.readthedocs.io/en/stable/rest_api.html'.format( api_host, api_port, ), ) tasks.append(api_server) if self._options['console']: console = Console(app_) console.start() tasks.append(console) # spawn a greenlet to handle the version checking version = get_system_spec()['raiden'] if version is not None: tasks.append(gevent.spawn(check_version, version)) # spawn a greenlet to handle the gas reserve check tasks.append(gevent.spawn(check_gas_reserve, app_.raiden)) self._startup_hook() # wait for interrupt event = AsyncResult() def sig_set(sig=None, _frame=None): event.set(sig) gevent.signal(signal.SIGQUIT, sig_set) gevent.signal(signal.SIGTERM, sig_set) gevent.signal(signal.SIGINT, sig_set) # quit if any task exits, successfully or not for task in tasks: task.link(event) try: event.get() print('Signal received. Shutting down ...') except (EthNodeCommunicationError, RequestsConnectionError): print(ETHEREUM_NODE_COMMUNICATION_ERROR) sys.exit(1) except RaidenError as ex: click.secho(f'FATAL: {ex}', fg='red') except Exception as ex: with NamedTemporaryFile( 'w', prefix=f'raiden-exception-{datetime.utcnow():%Y-%m-%dT%H-%M}', suffix='.txt', delete=False, ) as traceback_file: traceback.print_exc(file=traceback_file) click.secho( f'FATAL: An unexpected exception occured. ' f'A traceback has been written to {traceback_file.name}\n' f'{ex}', fg='red', ) finally: self._shutdown_hook() def stop_task(task): try: if isinstance(task, Runnable): task.stop() else: task.kill() finally: task.get() # re-raise gevent.joinall( [gevent.spawn(stop_task, task) for task in tasks], app_.config.get('shutdown_timeout', settings.DEFAULT_SHUTDOWN_TIMEOUT), raise_error=True, ) return app_
class ConnectionManager: # pragma: no unittest """The ConnectionManager provides a high level abstraction for connecting to a Token network. Note: It is initialized with 0 funds; a connection to the token network will be only established _after_ calling `connect(funds)` """ # XXX Hack: for bootstrapping, the first node on a network opens a channel # with this address to become visible. BOOTSTRAP_ADDR_HEX = to_checksum_address("2" * 40) BOOTSTRAP_ADDR = to_canonical_address(BOOTSTRAP_ADDR_HEX) def __init__(self, raiden: "RaidenService", token_network_address: TokenNetworkAddress): self.raiden = raiden chain_state = views.state_from_raiden(raiden) token_network_state = views.get_token_network_by_address( chain_state, token_network_address ) token_network_registry = views.get_token_network_registry_by_token_network_address( chain_state, token_network_address ) assert token_network_state assert token_network_registry # TODO: # - Add timeout for transaction polling, used to overwrite the RaidenAPI # defaults # - Add a proper selection strategy (#576) self.funds = 0 self.initial_channel_target = 0 self.joinable_funds_target = 0.0 self.raiden = raiden self.registry_address = token_network_registry.address self.token_network_address = token_network_address self.token_address = token_network_state.token_address self.lock = Semaphore() #: protects self.funds and self.initial_channel_target self.api = RaidenAPI(raiden) def connect( self, funds: typing.TokenAmount, initial_channel_target: int = 3, joinable_funds_target: float = 0.4, ) -> None: """Connect to the network. Subsequent calls to `connect` are allowed, but will only affect the spendable funds and the connection strategy parameters for the future. `connect` will not close any channels. Note: the ConnectionManager does not discriminate manually opened channels from automatically opened ones. If the user manually opened channels, those deposit amounts will affect the funding per channel and the number of new channels opened. Args: funds: Target amount of tokens spendable to join the network. initial_channel_target: Target number of channels to open. joinable_funds_target: Amount of funds not initially assigned. """ token = self.raiden.proxy_manager.token(self.token_address) token_balance = token.balance_of(self.raiden.address) if token_balance < funds: raise InvalidAmount( f"Insufficient balance for token {to_checksum_address(self.token_address)}" ) if funds <= 0: raise InvalidAmount("The funds to use in the connection need to be a positive integer") if joinable_funds_target < 0 or joinable_funds_target > 1: raise InvalidAmount( f"joinable_funds_target should be between 0 and 1. Given: {joinable_funds_target}" ) with self.lock: self.funds = funds self.initial_channel_target = initial_channel_target self.joinable_funds_target = joinable_funds_target log_open_channels(self.raiden, self.registry_address, self.token_address, funds) qty_network_channels = views.count_token_network_channels( views.state_from_raiden(self.raiden), self.registry_address, self.token_address ) if not qty_network_channels: log.info( "Bootstrapping token network.", node=to_checksum_address(self.raiden.address), network_id=to_checksum_address(self.registry_address), token_id=to_checksum_address(self.token_address), ) self.api.channel_open( self.registry_address, self.token_address, self.BOOTSTRAP_ADDR ) else: self._open_channels() def leave(self, registry_address: TokenNetworkRegistryAddress) -> List[NettingChannelState]: """ Leave the token network. This implies closing all channels and waiting for all channels to be settled. """ with self.lock: self.initial_channel_target = 0 channels_to_close = views.get_channelstate_open( chain_state=views.state_from_raiden(self.raiden), token_network_registry_address=registry_address, token_address=self.token_address, ) partner_addresses = [ channel_state.partner_state.address for channel_state in channels_to_close ] self.api.channel_batch_close(registry_address, self.token_address, partner_addresses) channel_ids = [channel_state.identifier for channel_state in channels_to_close] waiting.wait_for_settle( self.raiden, registry_address, self.token_address, channel_ids, self.raiden.alarm.sleep_time, ) return channels_to_close def join_channel(self, partner_address: Address, partner_deposit: TokenAmount) -> None: """Will be called, when we were selected as channel partner by another node. It will fund the channel with up to the partners deposit, but not more than remaining funds or the initial funding per channel. If the connection manager has no funds, this is a noop. """ # Consider this race condition: # # - Partner opens the channel and starts the deposit. # - This nodes learns about the new channel, starts ConnectionManager's # retry_connect, which will start a deposit for this half of the # channel. # - This node learns about the partner's deposit before its own. # join_channel is called which will try to deposit again. # # To fix this race, first the node must wait for the pending operations # to finish, because in them could be a deposit, and then deposit must # be called only if the channel is still not funded. token_network_proxy = self.raiden.proxy_manager.token_network(self.token_network_address) # Wait for any pending operation in the channel to complete, before # deciding on the deposit with self.lock, token_network_proxy.channel_operations_lock[partner_address]: channel_state = views.get_channelstate_for( chain_state=views.state_from_raiden(self.raiden), token_network_registry_address=self.registry_address, token_address=self.token_address, partner_address=partner_address, ) if not channel_state: return joining_funds = min( partner_deposit, self._funds_remaining, self._initial_funding_per_partner ) if joining_funds <= 0 or self._leaving_state: return if joining_funds <= channel_state.our_state.contract_balance: return try: self.api.set_total_channel_deposit( self.registry_address, self.token_address, partner_address, joining_funds ) except RaidenRecoverableError: log.info( "Channel not in opened state", node=to_checksum_address(self.raiden.address) ) except InvalidDBData: raise except RaidenUnrecoverableError as e: should_crash = ( self.raiden.config["environment_type"] != Environment.PRODUCTION or self.raiden.config["unrecoverable_error_should_crash"] ) if should_crash: raise log.critical(str(e), node=to_checksum_address(self.raiden.address)) else: log.info( "Joined a channel", node=to_checksum_address(self.raiden.address), partner=to_checksum_address(partner_address), funds=joining_funds, ) def retry_connect(self) -> None: """Will be called when new channels in the token network are detected. If the minimum number of channels was not yet established, it will try to open new channels. If the connection manager has no funds, this is a noop. """ with self.lock: if self._funds_remaining > 0 and not self._leaving_state: self._open_channels() def _find_new_partners(self) -> List[Address]: """ Search the token network for potential channel partners. """ open_channels = views.get_channelstate_open( chain_state=views.state_from_raiden(self.raiden), token_network_registry_address=self.registry_address, token_address=self.token_address, ) known = set(channel_state.partner_state.address for channel_state in open_channels) known.add(self.BOOTSTRAP_ADDR) known.add(self.raiden.address) participants_addresses = views.get_participants_addresses( views.state_from_raiden(self.raiden), self.registry_address, self.token_address ) available_addresses = list(participants_addresses - known) shuffle(available_addresses) new_partners = available_addresses log.debug( "Found partners", node=to_checksum_address(self.raiden.address), number_of_partners=len(available_addresses), ) return new_partners def _join_partner(self, partner: Address) -> None: """ Ensure a channel exists with partner and is funded in our side """ log.info( "Trying to join or fund channel with partner further", node=to_checksum_address(self.raiden.address), partner=to_checksum_address(partner), ) try: self.api.channel_open(self.registry_address, self.token_address, partner) except DuplicatedChannelError: # If channel already exists (either because partner created it, # or it's nonfunded channel), continue to ensure it's funded pass total_deposit = self._initial_funding_per_partner if total_deposit == 0: return try: self.api.set_total_channel_deposit( registry_address=self.registry_address, token_address=self.token_address, partner_address=partner, total_deposit=total_deposit, ) except InvalidDBData: raise except RECOVERABLE_ERRORS: log.info( "Deposit failed", node=to_checksum_address(self.raiden.address), partner=to_checksum_address(partner), ) except RaidenUnrecoverableError: should_crash = ( self.raiden.config["environment_type"] != Environment.PRODUCTION or self.raiden.config["unrecoverable_error_should_crash"] ) if should_crash: raise log.critical( "Deposit failed", node=to_checksum_address(self.raiden.address), partner=to_checksum_address(partner), ) def _open_channels(self) -> bool: """ Open channels until there are `self.initial_channel_target` channels open. Do nothing if there are enough channels open already. Note: - This method must be called with the lock held. Return: - False if no channels could be opened """ open_channels = views.get_channelstate_open( chain_state=views.state_from_raiden(self.raiden), token_network_registry_address=self.registry_address, token_address=self.token_address, ) open_channels = [ channel_state for channel_state in open_channels if channel_state.partner_state.address != self.BOOTSTRAP_ADDR ] funded_channels = [ channel_state for channel_state in open_channels if channel_state.our_state.contract_balance >= self._initial_funding_per_partner ] nonfunded_channels = [ channel_state for channel_state in open_channels if channel_state not in funded_channels ] possible_new_partners = self._find_new_partners() if possible_new_partners == 0: return False # if we already met our target, break if len(funded_channels) >= self.initial_channel_target: return False # if we didn't, but there's no nonfunded channels and no available partners # it means the network is smaller than our target, so we should also break if not nonfunded_channels and possible_new_partners == 0: return False n_to_join = self.initial_channel_target - len(funded_channels) nonfunded_partners = [ channel_state.partner_state.address for channel_state in nonfunded_channels ] # first, fund nonfunded channels, then open and fund with possible_new_partners, # until initial_channel_target of funded channels is met join_partners = (nonfunded_partners + possible_new_partners)[:n_to_join] log.debug( "Spawning greenlets to join partners", node=to_checksum_address(self.raiden.address), num_greenlets=len(join_partners), ) greenlets = set(gevent.spawn(self._join_partner, partner) for partner in join_partners) gevent.joinall(greenlets, raise_error=True) return True @property def _initial_funding_per_partner(self) -> TokenAmount: """The calculated funding per partner depending on configuration and overall funding of the ConnectionManager. Note: - This attribute must be accessed with the lock held. """ if self.initial_channel_target: return TokenAmount( int(self.funds * (1 - self.joinable_funds_target) / self.initial_channel_target) ) return TokenAmount(0) @property def _funds_remaining(self) -> TokenAmount: """The remaining funds after subtracting the already deposited amounts. Note: - This attribute must be accessed with the lock held. """ if self.funds > 0: token = self.raiden.proxy_manager.token(self.token_address) token_balance = token.balance_of(self.raiden.address) sum_deposits = views.get_our_deposits_for_token_network( views.state_from_raiden(self.raiden), self.registry_address, self.token_address ) return TokenAmount(min(self.funds - sum_deposits, token_balance)) return TokenAmount(0) @property def _leaving_state(self) -> bool: """True if the node is leaving the token network. Note: - This attribute must be accessed with the lock held. """ return self.initial_channel_target < 1 def __repr__(self) -> str: if self.raiden.wal is None: return ( f"{self.__class__.__name__}(target={self.initial_channel_target} " "WAL not initialized)" ) open_channels = views.get_channelstate_open( chain_state=views.state_from_raiden(self.raiden), token_network_registry_address=self.registry_address, token_address=self.token_address, ) return ( f"{self.__class__.__name__}(target={self.initial_channel_target} " + f"open_channels={len(open_channels)}:{open_channels!r})" )
def test_close_channel_lack_of_balance_proof(raiden_chain, deposit, token_addresses): app0, app1 = raiden_chain token_address = token_addresses[0] token_network_identifier = views.get_token_network_identifier_by_token_address( views.state_from_app(app0), app0.raiden.default_registry.address, token_address, ) token_proxy = app0.raiden.chain.token(token_address) initial_balance0 = token_proxy.balance_of(app0.raiden.address) initial_balance1 = token_proxy.balance_of(app1.raiden.address) amount = 100 identifier = 1 secret = pending_mediated_transfer( raiden_chain, token_network_identifier, amount, identifier, ) # Stop app0 to avoid sending the unlock app0.raiden.transport.stop_and_wait() reveal_secret = RevealSecret( random.randint(0, UINT64_MAX), secret, ) app0.raiden.sign(reveal_secret) message_handler.on_message(app1.raiden, reveal_secret) RaidenAPI(app0.raiden).channel_close( app0.raiden.default_registry.address, token_address, app1.raiden.address, ) channel_state = get_channelstate(app0, app1, token_network_identifier) waiting.wait_for_settle( app0.raiden, app0.raiden.default_registry.address, token_address, [channel_state.identifier], app0.raiden.alarm.sleep_time, ) # wait for the node to call batch unlock with gevent.Timeout(10): wait_for_batch_unlock( app0, token_network_identifier, channel_state.partner_state.address, channel_state.our_state.address, ) expected_balance0 = initial_balance0 + deposit - amount expected_balance1 = initial_balance1 + deposit + amount assert token_proxy.balance_of(app0.raiden.address) == expected_balance0 assert token_proxy.balance_of(app1.raiden.address) == expected_balance1
def test_token_addresses(raiden_network, token_addresses): app = raiden_network[0] api = RaidenAPI(app.raiden) registry_address = app.raiden.default_registry.address assert set(api.get_tokens_list(registry_address)) == set(token_addresses)
class ConsoleTools(object): def __init__(self, raiden_service, discovery, settle_timeout, reveal_timeout): self._chain = raiden_service.chain self._raiden = raiden_service self._api = RaidenAPI(raiden_service) self._discovery = discovery self.settle_timeout = settle_timeout self.reveal_timeout = reveal_timeout self.deposit = self._api.deposit def create_token(self, initial_alloc=10**6, name='raidentester', symbol='RDT', decimals=2, timeout=60, gasprice=default_gasprice, auto_register=True): """Create a proxy for a new HumanStandardToken (ERC20), that is initialized with Args(below). Per default it will be registered with 'raiden'. Args: initial_alloc (int): amount of initial tokens. name (str): human readable token name. symbol (str): token shorthand symbol. decimals (int): decimal places. timeout (int): timeout in seconds for creation. gasprice (int): gasprice for the creation transaction. auto_register (boolean): if True(default), automatically register the token with raiden. Returns: token_address_hex: the hex encoded address of the new token/token. """ contract_path = get_contract_path('HumanStandardToken.sol') # Deploy a new ERC20 token token_proxy = self._chain.client.deploy_solidity_contract( self._raiden.address, 'HumanStandardToken', compile_file(contract_path), dict(), (initial_alloc, name, decimals, symbol), contract_path=contract_path, gasprice=gasprice, timeout=timeout) token_address_hex = token_proxy.address.encode('hex') if auto_register: self.register_token(token_address_hex) print("Successfully created {}the token '{}'.".format( 'and registered ' if auto_register else ' ', name)) return token_address_hex def register_token(self, token_address_hex): """Register a token with the raiden token manager. Args: token_address_hex (string): a hex encoded token address. Returns: channel_manager: the channel_manager contract_proxy. """ # Add the ERC20 token to the raiden registry token_address = safe_address_decode(token_address_hex) self._chain.default_registry.add_token(token_address) # Obtain the channel manager for the token channel_manager = self._chain.manager_by_token(token_address) # Register the channel manager with the raiden registry self._raiden.register_channel_manager(channel_manager.address) return channel_manager def open_channel_with_funding(self, token_address_hex, peer_address_hex, amount, settle_timeout=None, reveal_timeout=None): """Convenience method to open a channel. Args: token_address_hex (str): hex encoded address of the token for the channel. peer_address_hex (str): hex encoded address of the channel peer. amount (int): amount of initial funding of the channel. settle_timeout (int): amount of blocks for the settle time (if None use app defaults). reveal_timeout (int): amount of blocks for the reveal time (if None use app defaults). Return: netting_channel: the (newly opened) netting channel object. """ # Check, if peer is discoverable peer_address = safe_address_decode(peer_address_hex) token_address = safe_address_decode(token_address_hex) try: self._discovery.get(peer_address) except KeyError: print("Error: peer {} not found in discovery".format( peer_address_hex)) return self._api.open( token_address, peer_address, settle_timeout=settle_timeout, reveal_timeout=reveal_timeout, ) return self._api.deposit(token_address, peer_address, amount) def channel_stats_for(self, token_address_hex, peer_address_hex, pretty=False): """Collect information about sent and received transfers between yourself and your peer for the given token. Args: token_address_hex (string): hex encoded address of the token peer_address_hex (string): hex encoded address of the peer pretty (boolean): if True, print a json representation instead of returning a dict Returns: stats (dict): collected stats for the channel or None if pretty """ peer_address = safe_address_decode(peer_address_hex) token_address = safe_address_decode(token_address_hex) # Get the token token = self._chain.token(token_address) # Obtain the token manager graph = self._raiden.channelgraphs[token_address] assert graph # Get the channel channel = graph.partneraddress_channel[peer_address] assert channel # Collect data stats = dict( transfers=dict( received=[ t.transferred_amount for t in channel.received_transfers ], sent=[t.transferred_amount for t in channel.sent_transfers], ), channel=( channel if not pretty else channel.external_state.netting_channel.address.encode('hex')), lifecycle=dict( opened_at=channel.external_state.opened_block or 'not yet', can_transfer=channel.can_transfer, closed_at=channel.external_state.closed_block or 'not yet', settled_at=channel.external_state.settled_block or 'not yet', ), funding=channel.external_state.netting_channel.detail( self._raiden.address), token=dict( our_balance=token.balance_of(self._raiden.address), partner_balance=token.balance_of(peer_address), name=token.proxy.name(), symbol=token.proxy.symbol(), ), ) stats['funding']['our_address'] = stats['funding'][ 'our_address'].encode('hex') stats['funding']['partner_address'] = stats['funding'][ 'partner_address'].encode('hex') if not pretty: return stats else: print(json.dumps(stats, indent=2, sort_keys=True)) def show_events_for(self, token_address_hex, peer_address_hex): """Find all EVM-EventLogs for a channel. Args: token_address_hex (string): hex encoded address of the token peer_address_hex (string): hex encoded address of the peer Returns: events (list) """ token_address = safe_address_decode(token_address_hex) peer_address = safe_address_decode(peer_address_hex) graph = self._raiden.channelgraphs[token_address] assert graph channel = graph.partneraddress_channel[peer_address] netcontract_address = channel.external_state.netting_channel.address assert netcontract_address netting_channel = self._chain.netting_channel(netcontract_address) return events.netting_channel_events(self._chain.client, netting_channel) def wait_for_contract(self, contract_address_hex, timeout=None): """Wait until a contract is mined Args: contract_address_hex (string): hex encoded address of the contract timeout (int): time to wait for the contract to get mined Returns: True if the contract got mined, false otherwise """ contract_address = safe_address_decode(contract_address_hex) start_time = time.time() result = self._raiden.chain.client.call( 'eth_getCode', contract_address, 'latest', ) current_time = time.time() while result == '0x': if timeout and start_time + timeout > current_time: return False result = self._raiden.chain.client.call( 'eth_getCode', contract_address, 'latest', ) gevent.sleep(0.5) current_time = time.time() return result != '0x'
def _run_smoketest(): print_step('Starting Raiden') config = deepcopy(App.DEFAULT_CONFIG) if args.get('extra_config', dict()): merge_dict(config, args['extra_config']) del args['extra_config'] args['config'] = config raiden_stdout = StringIO() with contextlib.redirect_stdout(raiden_stdout): try: # invoke the raiden app app = run_app(**args) raiden_api = RaidenAPI(app.raiden) rest_api = RestAPI(raiden_api) api_server = APIServer(rest_api) (api_host, api_port) = split_endpoint(args['api_address']) api_server.start(api_host, api_port) raiden_api.channel_open( registry_address=contract_addresses[ CONTRACT_TOKEN_NETWORK_REGISTRY], token_address=to_canonical_address(token.contract.address), partner_address=to_canonical_address(TEST_PARTNER_ADDRESS), ) raiden_api.set_total_channel_deposit( contract_addresses[CONTRACT_TOKEN_NETWORK_REGISTRY], to_canonical_address(token.contract.address), to_canonical_address(TEST_PARTNER_ADDRESS), TEST_DEPOSIT_AMOUNT, ) token_addresses = [to_checksum_address(token.contract.address)] success = False print_step('Running smoketest') error = run_smoketests( app.raiden, args['transport'], token_addresses, contract_addresses[CONTRACT_ENDPOINT_REGISTRY], debug=debug, ) if error is not None: append_report('Smoketest assertion error', error) else: success = True finally: app.stop() app.raiden.get() node = ethereum[0] node.send_signal(2) err, out = node.communicate() append_report('Ethereum stdout', out) append_report('Ethereum stderr', err) append_report('Raiden Node stdout', raiden_stdout.getvalue()) if success: print_step(f'Smoketest successful') else: print_step(f'Smoketest had errors', error=True) return success
class ConsoleTools: def __init__(self, raiden_service, discovery, settle_timeout): self._chain = raiden_service.chain self._raiden = raiden_service self._api = RaidenAPI(raiden_service) self._discovery = discovery self.settle_timeout = settle_timeout def create_token( self, registry_address, initial_alloc=10 ** 6, name='raidentester', symbol='RDT', decimals=2, timeout=60, auto_register=True, ): """ Create a proxy for a new HumanStandardToken (ERC20), that is initialized with Args(below). Per default it will be registered with 'raiden'. Args: initial_alloc (int): amount of initial tokens. name (str): human readable token name. symbol (str): token shorthand symbol. decimals (int): decimal places. timeout (int): timeout in seconds for creation. auto_register (boolean): if True(default), automatically register the token with raiden. Returns: token_address_hex: the hex encoded address of the new token/token. """ contract_path = get_contract_path('HumanStandardToken.sol') # Deploy a new ERC20 token with gevent.Timeout(timeout): token_proxy = self._chain.client.deploy_solidity_contract( 'HumanStandardToken', compile_files_cwd([contract_path]), dict(), (initial_alloc, name, decimals, symbol), contract_path=contract_path, ) token_address_hex = encode_hex(token_proxy.contract_address) if auto_register: self.register_token(registry_address, token_address_hex) print("Successfully created {}the token '{}'.".format( 'and registered ' if auto_register else ' ', name, )) return token_address_hex def register_token( self, registry_address_hex: typing.AddressHex, token_address_hex: typing.AddressHex, retry_timeout: typing.NetworkTimeout = DEFAULT_RETRY_TIMEOUT, ) -> TokenNetwork: """ Register a token with the raiden token manager. Args: registry_address: registry address token_address_hex (string): a hex encoded token address. Returns: The token network proxy. """ registry_address = decode_hex(registry_address_hex) token_address = decode_hex(token_address_hex) registry = self._raiden.chain.token_network_registry(registry_address) token_network_address = registry.add_token(token_address) # Register the channel manager with the raiden registry waiting.wait_for_payment_network( self._raiden, registry.address, token_address, retry_timeout, ) return self._raiden.chain.token_network(token_network_address) def open_channel_with_funding( self, registry_address_hex, token_address_hex, peer_address_hex, total_deposit, settle_timeout=None, ): """ Convenience method to open a channel. Args: registry_address_hex (str): hex encoded address of the registry for the channel. token_address_hex (str): hex encoded address of the token for the channel. peer_address_hex (str): hex encoded address of the channel peer. total_deposit (int): amount of total funding for the channel. settle_timeout (int): amount of blocks for the settle time (if None use app defaults). Return: netting_channel: the (newly opened) netting channel object. """ # Check, if peer is discoverable registry_address = decode_hex(registry_address_hex) peer_address = decode_hex(peer_address_hex) token_address = decode_hex(token_address_hex) try: self._discovery.get(peer_address) except KeyError: print('Error: peer {} not found in discovery'.format(peer_address_hex)) return None self._api.channel_open( registry_address, token_address, peer_address, settle_timeout=settle_timeout, ) return self._api.set_total_channel_deposit( registry_address, token_address, peer_address, total_deposit, ) def wait_for_contract(self, contract_address_hex, timeout=None): """ Wait until a contract is mined Args: contract_address_hex (string): hex encoded address of the contract timeout (int): time to wait for the contract to get mined Returns: True if the contract got mined, false otherwise """ contract_address = decode_hex(contract_address_hex) start_time = time.time() result = self._raiden.chain.client.web3.eth.getCode( to_checksum_address(contract_address), ) current_time = time.time() while not result: if timeout and start_time + timeout > current_time: return False result = self._raiden.chain.client.web3.eth.getCode( to_checksum_address(contract_address), ) gevent.sleep(0.5) current_time = time.time() return len(result) > 0
def test_channel_deposit(raiden_chain, deposit, retry_timeout, token_addresses): app0, app1 = raiden_chain token_address = token_addresses[0] registry_address = app0.raiden.default_registry.address token_network_identifier = views.get_token_network_identifier_by_token_address( views.state_from_app(app0), app0.raiden.default_registry.address, token_address, ) channel0 = get_channelstate(app0, app1, token_network_identifier) channel1 = get_channelstate(app1, app0, token_network_identifier) assert channel0 is None assert channel1 is None RaidenAPI(app0.raiden).channel_open( registry_address, token_address, app1.raiden.address, ) wait_both_channel_open(app0, app1, registry_address, token_address, retry_timeout) assert_synced_channel_state( token_network_identifier, app0, 0, [], app1, 0, [], ) RaidenAPI(app0.raiden).set_total_channel_deposit( registry_address, token_address, app1.raiden.address, deposit, ) wait_both_channel_deposit( app0, app1, registry_address, token_address, deposit, retry_timeout, ) assert_synced_channel_state( token_network_identifier, app0, deposit, [], app1, 0, [], ) RaidenAPI(app1.raiden).set_total_channel_deposit( registry_address, token_address, app0.raiden.address, deposit, ) wait_both_channel_deposit( app1, app0, registry_address, token_address, deposit, retry_timeout, ) assert_synced_channel_state( token_network_identifier, app0, deposit, [], app1, deposit, [], )